]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
Merge tag 'acpi-4.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Nov 2017 23:49:50 +0000 (18:49 -0500)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Nov 2017 23:49:50 +0000 (18:49 -0500)
Pull ACPI fixes from Rafael Wysocki:
 "These fix a regression related to the ACPI EC handling during system
  suspend/resume on some platforms and prevent modalias from being
  exposed to user space for ACPI device object with "not functional and
  not present" status.

  Specifics:

   - Fix an ACPI EC driver regression (from the 4.9 cycle) causing the
     driver's power management operations to be omitted during system
     suspend/resume on platforms where the EC instance from the ECDT
     table is used instead of the one from the DSDT (Lv Zheng).

   - Prevent modalias from being exposed to user space for ACPI device
     objects with _STA returning 0 (not present and not functional) to
     prevent driver modules from being loaded automatically for hardware
     that is not actually present on some platforms (Hans de Goede)"

* tag 'acpi-4.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / EC: Fix regression related to PM ops support in ECDT device
  ACPI / bus: Leave modalias empty for devices which are not present

1852 files changed:
.gitignore
Documentation/ABI/testing/dell-smbios-wmi [new file with mode: 0644]
Documentation/ABI/testing/sysfs-platform-dell-smbios [new file with mode: 0644]
Documentation/ABI/testing/sysfs-platform-intel-wmi-thunderbolt [new file with mode: 0644]
Documentation/admin-guide/dynamic-debug-howto.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/thunderbolt.rst
Documentation/clearing-warn-once.txt [new file with mode: 0644]
Documentation/core-api/local_ops.rst
Documentation/dev-tools/coccinelle.rst
Documentation/dev-tools/kcov.rst
Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,bdpsys.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,hifsys.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
Documentation/devicetree/bindings/clock/exynos4-clock.txt
Documentation/devicetree/bindings/clock/exynos5433-clock.txt
Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt
Documentation/devicetree/bindings/display/google,goldfish-fb.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
Documentation/devicetree/bindings/mtd/denali-nand.txt
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
Documentation/devicetree/bindings/mtd/mtk-quadspi.txt
Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
Documentation/devicetree/bindings/rtc/imxdi-rtc.txt
Documentation/devicetree/bindings/rtc/pcf85363.txt [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/rtc-mt7622.txt [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
Documentation/devicetree/bindings/trivial-devices.txt
Documentation/devicetree/bindings/usb/usb-device.txt
Documentation/driver-model/devres.txt
Documentation/filesystems/proc.txt
Documentation/ia64/xen.txt
Documentation/printk-formats.txt
Documentation/process/5.Posting.rst
Documentation/scheduler/sched-deadline.txt
Documentation/security/keys/core.rst
Documentation/svga.txt
Documentation/switchtec.txt
Documentation/sysctl/vm.txt
Documentation/translations/ko_KR/memory-barriers.txt
Documentation/virtual/kvm/devices/arm-vgic-its.txt
Documentation/x86/protection-keys.txt
Kbuild
MAINTAINERS
Makefile
arch/alpha/kernel/srmcons.c
arch/arc/Kconfig
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/include/asm/arcregs.h
arch/arc/kernel/perf_event.c
arch/arc/kernel/setup.c
arch/arc/mm/tlb.c
arch/arc/plat-axs10x/Kconfig
arch/arc/plat-axs10x/axs10x.c
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/include/asm/assembler.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/entry-header.S
arch/arm/kvm/Kconfig
arch/arm/kvm/Makefile
arch/arm/mach-iop32x/n2100.c
arch/arm/mach-ixp4xx/dsmg600-setup.c
arch/arm/mach-ixp4xx/nas100d-setup.c
arch/arm/mach-orion5x/db88f5281-setup.c
arch/arm/mach-pxa/cm-x255.c
arch/arm/mach-uniphier/Makefile
arch/arm/mm/dump.c
arch/arm/mm/init.c
arch/arm64/include/asm/pgtable.h
arch/arm64/kvm/Kconfig
arch/arm64/kvm/Makefile
arch/blackfin/kernel/nmi.c
arch/c6x/Makefile
arch/frv/kernel/.gitignore [new file with mode: 0644]
arch/hexagon/Makefile
arch/hexagon/kernel/ptrace.c
arch/ia64/include/asm/topology.h
arch/ia64/kernel/asm-offsets.c
arch/m68k/amiga/amisound.c
arch/m68k/mac/macboing.c
arch/microblaze/include/asm/mmu_context_mm.h
arch/mips/boot/dts/brcm/Makefile
arch/mips/boot/dts/cavium-octeon/Makefile
arch/mips/boot/dts/img/Makefile
arch/mips/boot/dts/ingenic/Makefile
arch/mips/boot/dts/lantiq/Makefile
arch/mips/boot/dts/mti/Makefile
arch/mips/boot/dts/netlogic/Makefile
arch/mips/boot/dts/ni/Makefile
arch/mips/boot/dts/pic32/Makefile
arch/mips/boot/dts/qca/Makefile
arch/mips/boot/dts/ralink/Makefile
arch/mips/boot/dts/xilfpga/Makefile
arch/mips/include/asm/pgtable.h
arch/mips/kvm/mips.c
arch/mips/lasat/picvue_proc.c
arch/mips/mti-malta/malta-display.c
arch/mn10300/mm/fault.c
arch/parisc/kernel/pdc_cons.c
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/imc-pmu.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/tau_6xx.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/slice.c
arch/powerpc/oprofile/op_model_cell.c
arch/powerpc/perf/imc-pmu.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/powermac/low_i2c.c
arch/powerpc/platforms/powernv/opal-imc.c
arch/powerpc/platforms/powernv/vas.c
arch/s390/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/appldata/appldata_mem.c
arch/s390/appldata/appldata_net_sum.c
arch/s390/appldata/appldata_os.c
arch/s390/boot/install.sh
arch/s390/crypto/aes_s390.c
arch/s390/crypto/arch_random.c
arch/s390/crypto/crc32-vx.c
arch/s390/crypto/des_s390.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/paes_s390.c
arch/s390/crypto/prng.c
arch/s390/crypto/sha.h
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/s390/crypto/sha_common.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/kprobes.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/livepatch.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/sysinfo.h
arch/s390/include/asm/topology.h
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_para.h
arch/s390/include/uapi/asm/kvm_perf.h
arch/s390/include/uapi/asm/virtio-ccw.h
arch/s390/include/uapi/asm/zcrypt.h
arch/s390/kernel/debug.c
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/entry.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/lgr.c
arch/s390/kernel/module.c
arch/s390/kernel/nmi.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/sthyi.c
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso32/clock_getres.S
arch/s390/kernel/vdso32/clock_gettime.S
arch/s390/kernel/vdso32/gettimeofday.S
arch/s390/kernel/vdso64/clock_getres.S
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/kernel/vdso64/gettimeofday.S
arch/s390/kernel/vtime.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/cmm.c
arch/s390/mm/gmap.c
arch/s390/mm/mmap.c
arch/s390/mm/pgtable.c
arch/s390/pci/pci.c
arch/s390/pci/pci_debug.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_insn.c
arch/sh/Makefile
arch/sh/boot/compressed/.gitignore
arch/sh/boot/compressed/misc.c
arch/sh/drivers/heartbeat.c
arch/sh/drivers/pci/common.c
arch/sh/drivers/push-switch.c
arch/sh/include/asm/topology.h
arch/sparc/Kbuild
arch/sparc/Kconfig
arch/sparc/Makefile
arch/sparc/include/asm/bitops_64.h
arch/sparc/include/asm/clocksource.h [new file with mode: 0644]
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/elf_64.h
arch/sparc/include/asm/mmu_64.h
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/processor_64.h
arch/sparc/include/asm/topology_64.h
arch/sparc/include/asm/tsb.h
arch/sparc/include/asm/vdso.h [new file with mode: 0644]
arch/sparc/include/asm/vvar.h [new file with mode: 0644]
arch/sparc/include/uapi/asm/auxvec.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/head_64.S
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/time_64.c
arch/sparc/kernel/vdso.c [new file with mode: 0644]
arch/sparc/kernel/viohs.c
arch/sparc/lib/Makefile
arch/sparc/lib/NG4fls.S [new file with mode: 0644]
arch/sparc/lib/NG4patch.S
arch/sparc/lib/atomic32.c
arch/sparc/lib/fls.S [new file with mode: 0644]
arch/sparc/lib/fls64.S [new file with mode: 0644]
arch/sparc/mm/gup.c
arch/sparc/vdso/.gitignore [new file with mode: 0644]
arch/sparc/vdso/Makefile [new file with mode: 0644]
arch/sparc/vdso/vclock_gettime.c [new file with mode: 0644]
arch/sparc/vdso/vdso-layout.lds.S [new file with mode: 0644]
arch/sparc/vdso/vdso-note.S [new file with mode: 0644]
arch/sparc/vdso/vdso.lds.S [new file with mode: 0644]
arch/sparc/vdso/vdso2c.c [new file with mode: 0644]
arch/sparc/vdso/vdso2c.h [new file with mode: 0644]
arch/sparc/vdso/vdso32/.gitignore [new file with mode: 0644]
arch/sparc/vdso/vdso32/vclock_gettime.c [new file with mode: 0644]
arch/sparc/vdso/vdso32/vdso-note.S [new file with mode: 0644]
arch/sparc/vdso/vdso32/vdso32.lds.S [new file with mode: 0644]
arch/sparc/vdso/vma.c [new file with mode: 0644]
arch/tile/include/asm/pgtable.h
arch/tile/include/asm/topology.h
arch/um/Kconfig.common
arch/x86/Kconfig
arch/x86/boot/compressed/kaslr.c
arch/x86/entry/entry_64.S
arch/x86/entry/vdso/Makefile
arch/x86/events/intel/core.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/elf.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/hypertransport.h [deleted file]
arch/x86/include/asm/insn-eval.h
arch/x86/include/asm/io.h
arch/x86/include/asm/irqdomain.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/processor.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/Makefile
arch/x86/kernel/apic/htirq.c [deleted file]
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/sys_x86_64.c
arch/x86/kernel/umip.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/emulate.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/insn-eval.c
arch/x86/lib/x86-opcode-map.txt
arch/x86/mm/hugetlbpage.c
arch/x86/mm/mmap.c
block/blk-core.c
block/blk-stat.c
block/blk-throttle.c
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/pkcs7_key_type.c
crypto/asymmetric_keys/pkcs7_parser.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/x509_public_key.c
crypto/skcipher.c
drivers/atm/ambassador.c
drivers/atm/firestream.c
drivers/atm/fore200e.c
drivers/atm/horizon.c
drivers/atm/idt77105.c
drivers/atm/idt77252.c
drivers/atm/iphase.c
drivers/atm/lanai.c
drivers/atm/nicstar.c
drivers/atm/suni.c
drivers/auxdisplay/Kconfig
drivers/base/power/wakeup.c
drivers/block/DAC960.c
drivers/block/DAC960.h
drivers/block/aoe/aoecmd.c
drivers/block/ataflop.c
drivers/block/rbd.c
drivers/block/rsxx/cregs.c
drivers/block/rsxx/dma.c
drivers/block/skd_main.c
drivers/block/sunvdc.c
drivers/block/swim3.c
drivers/block/umem.c
drivers/block/xsysace.c
drivers/char/dtlk.c
drivers/char/hangcheck-timer.c
drivers/char/ipmi/bt-bmc.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/mem.c
drivers/char/nwbutton.c
drivers/char/nwbutton.h
drivers/char/rtc.c
drivers/char/tpm/tpm-dev-common.c
drivers/clk/at91/clk-utmi.c
drivers/clk/bcm/clk-kona-setup.c
drivers/clk/clk-cdce925.c
drivers/clk/clk-gpio.c
drivers/clk/clk-hsdk-pll.c
drivers/clk/clk-mux.c
drivers/clk/clk-stm32h7.c
drivers/clk/clk-twl6040.c
drivers/clk/clk-u300.c
drivers/clk/clk-wm831x.c
drivers/clk/clk-xgene.c
drivers/clk/clk.c
drivers/clk/hisilicon/clk-hi3620.c
drivers/clk/hisilicon/clk-hi3660.c
drivers/clk/hisilicon/clk-hi6220.c
drivers/clk/hisilicon/clk-hix5hd2.c
drivers/clk/hisilicon/clkgate-separated.c
drivers/clk/hisilicon/crg-hi3798cv200.c
drivers/clk/imx/clk-busy.c
drivers/clk/imx/clk-gate2.c
drivers/clk/imx/clk-imx6q.c
drivers/clk/imx/clk-imx6ul.c
drivers/clk/imx/clk-imx7d.c
drivers/clk/imx/clk-pllv1.c
drivers/clk/imx/clk-pllv2.c
drivers/clk/mediatek/Kconfig
drivers/clk/mediatek/Makefile
drivers/clk/mediatek/clk-mt2701.c
drivers/clk/mediatek/clk-mt2712-bdp.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt2712-img.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt2712-jpgdec.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt2712-mfg.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt2712-mm.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt2712-vdec.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt2712-venc.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt2712.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt7622-aud.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt7622-eth.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt7622-hif.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mt7622.c [new file with mode: 0644]
drivers/clk/mediatek/clk-mtk.h
drivers/clk/mediatek/clk-pll.c
drivers/clk/meson/gxbb.c
drivers/clk/meson/gxbb.h
drivers/clk/mmp/clk-apbc.c
drivers/clk/mmp/clk-apmu.c
drivers/clk/mmp/clk-frac.c
drivers/clk/mmp/clk-gate.c
drivers/clk/mmp/clk-mix.c
drivers/clk/mmp/clk-mmp2.c
drivers/clk/mmp/clk-pxa168.c
drivers/clk/mmp/clk-pxa910.c
drivers/clk/mxs/clk-div.c
drivers/clk/mxs/clk-frac.c
drivers/clk/pxa/clk-pxa.c
drivers/clk/qcom/clk-rcg.h
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-rpm.c
drivers/clk/qcom/clk-smd-rpm.c
drivers/clk/qcom/common.c
drivers/clk/renesas/Kconfig
drivers/clk/renesas/Makefile
drivers/clk/renesas/clk-div6.c
drivers/clk/renesas/clk-div6.h
drivers/clk/renesas/clk-mstp.c
drivers/clk/renesas/clk-rcar-gen2.c
drivers/clk/renesas/clk-rz.c
drivers/clk/renesas/r8a7745-cpg-mssr.c
drivers/clk/renesas/r8a7795-cpg-mssr.c
drivers/clk/renesas/r8a7796-cpg-mssr.c
drivers/clk/renesas/r8a77970-cpg-mssr.c [new file with mode: 0644]
drivers/clk/renesas/r8a77995-cpg-mssr.c
drivers/clk/renesas/rcar-gen2-cpg.c
drivers/clk/renesas/rcar-gen2-cpg.h
drivers/clk/renesas/rcar-gen3-cpg.c
drivers/clk/renesas/rcar-gen3-cpg.h
drivers/clk/renesas/renesas-cpg-mssr.c
drivers/clk/renesas/renesas-cpg-mssr.h
drivers/clk/rockchip/clk-cpu.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3368.c
drivers/clk/samsung/Makefile
drivers/clk/samsung/clk-cpu.c
drivers/clk/samsung/clk-exynos-audss.c
drivers/clk/samsung/clk-exynos-clkout.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos4412-isp.c [new file with mode: 0644]
drivers/clk/samsung/clk-exynos5250.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/samsung/clk-exynos5433.c
drivers/clk/samsung/clk-exynos5440.c
drivers/clk/samsung/clk-pll.c
drivers/clk/samsung/clk-s3c2443.c
drivers/clk/samsung/clk.c
drivers/clk/samsung/clk.h
drivers/clk/sirf/clk-atlas6.c
drivers/clk/sirf/clk-atlas7.c
drivers/clk/sirf/clk-common.c
drivers/clk/sirf/clk-prima2.c
drivers/clk/spear/clk-aux-synth.c
drivers/clk/spear/clk-frac-synth.c
drivers/clk/spear/clk-gpt-synth.c
drivers/clk/spear/clk-vco-pll.c
drivers/clk/spear/clk.h
drivers/clk/spear/spear1310_clock.c
drivers/clk/spear/spear1340_clock.c
drivers/clk/sunxi-ng/Makefile
drivers/clk/sunxi-ng/ccu-sun4i-a10.c
drivers/clk/sunxi-ng/ccu-sun4i-a10.h
drivers/clk/sunxi-ng/ccu-sun5i.c
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
drivers/clk/sunxi-ng/ccu-sun6i-a31.h
drivers/clk/sunxi-ng/ccu-sun8i-a23.c
drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
drivers/clk/sunxi-ng/ccu-sun8i-de2.c
drivers/clk/sunxi-ng/ccu-sun8i-h3.c
drivers/clk/sunxi-ng/ccu_common.h
drivers/clk/sunxi-ng/ccu_nm.c
drivers/clk/sunxi-ng/ccu_nm.h
drivers/clk/sunxi-ng/ccu_reset.c
drivers/clk/sunxi-ng/ccu_sdm.c [new file with mode: 0644]
drivers/clk/sunxi-ng/ccu_sdm.h [new file with mode: 0644]
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-sun9i-mmc.c
drivers/clk/tegra/clk-bpmp.c
drivers/clk/tegra/clk-dfll.c
drivers/clk/tegra/clk-dfll.h
drivers/clk/tegra/clk-id.h
drivers/clk/tegra/clk-periph.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra-super-gen4.c
drivers/clk/tegra/clk-tegra114.c
drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
drivers/clk/tegra/clk-tegra20.c
drivers/clk/tegra/clk-tegra210.c
drivers/clk/tegra/clk-tegra30.c
drivers/clk/tegra/clk.h
drivers/clk/ti/clk-dra7-atl.c
drivers/clk/ti/divider.c
drivers/clk/ti/mux.c
drivers/clk/uniphier/clk-uniphier-mio.c
drivers/clk/uniphier/clk-uniphier-sys.c
drivers/clk/ux500/clk-prcc.c
drivers/clk/ux500/clk-prcmu.c
drivers/clk/ux500/clk-sysctrl.c
drivers/clk/versatile/clk-icst.c
drivers/clocksource/timer-of.c
drivers/clocksource/timer-of.h
drivers/cpufreq/Kconfig
drivers/cpufreq/mediatek-cpufreq.c
drivers/dax/device.c
drivers/dma-buf/dma-fence.c
drivers/firmware/psci_checker.c
drivers/firmware/qcom_scm-32.c
drivers/firmware/qcom_scm-64.c
drivers/firmware/qcom_scm.c
drivers/firmware/qcom_scm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/selftests/lib_sw_fence.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_psr.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tilcdc/Kconfig
drivers/gpu/drm/tilcdc/Makefile
drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c [deleted file]
drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts [deleted file]
drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h [deleted file]
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_irq.c
drivers/gpu/drm/vgem/vgem_fence.c
drivers/gpu/drm/via/via_dmablit.c
drivers/gpu/ipu-v3/ipu-dc.c
drivers/hid/hid-appleir.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-wiimote-core.c
drivers/hwmon/k10temp.c
drivers/hwmon/w83781d.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/hwspinlock/Kconfig
drivers/ide/ide-pnp.c
drivers/iio/common/ssp_sensors/ssp_dev.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/input/gameport/gameport.c
drivers/input/input.c
drivers/input/joystick/db9.c
drivers/input/joystick/gamecon.c
drivers/input/joystick/turbografx.c
drivers/input/touchscreen/s3c2410_ts.c
drivers/iommu/iova.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic-v4.c
drivers/irqchip/irq-imgpdc.c
drivers/irqchip/irq-s3c24xx.c
drivers/irqchip/irq-sni-exiu.c
drivers/irqchip/qcom-irq-combiner.c
drivers/isdn/capi/capidrv.c
drivers/isdn/divert/isdn_divert.c
drivers/isdn/hardware/eicon/divasi.c
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/isdn/hisax/asuscom.c
drivers/isdn/hisax/avm_pci.c
drivers/isdn/hisax/diva.c
drivers/isdn/hisax/elsa.c
drivers/isdn/hisax/hfc_sx.c
drivers/isdn/hisax/hfcscard.c
drivers/isdn/hisax/hisax_fcpcipnp.c
drivers/isdn/hisax/isurf.c
drivers/isdn/hisax/ix1_micro.c
drivers/isdn/hisax/niccy.c
drivers/isdn/hisax/sedlbauer.c
drivers/isdn/hisax/teles3.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/i4l/isdn_tty.c
drivers/lightnvm/pblk-core.c
drivers/lightnvm/pblk-gc.c
drivers/lightnvm/pblk-init.c
drivers/lightnvm/pblk-rl.c
drivers/lightnvm/pblk.h
drivers/lightnvm/rrpc.c
drivers/media/common/saa7146/saa7146_vbi.c
drivers/media/platform/fsl-viu.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
drivers/media/platform/vim2m.c
drivers/media/usb/au0828/au0828-dvb.c
drivers/media/usb/au0828/au0828-video.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/memstick/core/ms_block.c
drivers/mfd/rtsx_usb.c
drivers/misc/lkdtm_bugs.c
drivers/mmc/core/host.c
drivers/mtd/Kconfig
drivers/mtd/chips/map_ram.c
drivers/mtd/chips/map_rom.c
drivers/mtd/devices/docg3.c
drivers/mtd/devices/lart.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/mtdram.c
drivers/mtd/devices/slram.c
drivers/mtd/maps/cfi_flagadm.c
drivers/mtd/maps/impa7.c
drivers/mtd/maps/netsc520.c
drivers/mtd/maps/nettel.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/sbc_gxx.c
drivers/mtd/maps/ts5500_flash.c
drivers/mtd/maps/uclinux.c
drivers/mtd/mtdconcat.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/mtd/mtdsuper.c
drivers/mtd/mtdswap.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/Makefile
drivers/mtd/nand/ams-delta.c
drivers/mtd/nand/atmel/nand-controller.c
drivers/mtd/nand/atmel/pmecc.c
drivers/mtd/nand/atmel/pmecc.h
drivers/mtd/nand/au1550nd.c
drivers/mtd/nand/cmx270_nand.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/denali.h
drivers/mtd/nand/denali_dt.c
drivers/mtd/nand/denali_pci.c
drivers/mtd/nand/diskonchip.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/hisi504_nand.c
drivers/mtd/nand/mtk_ecc.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/nuc900_nand.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/qcom_nandc.c
drivers/mtd/nand/sh_flctl.c
drivers/mtd/parsers/Kconfig
drivers/mtd/parsers/Makefile
drivers/mtd/parsers/sharpslpart.c [new file with mode: 0644]
drivers/mtd/sm_ftl.c
drivers/mtd/spi-nor/Kconfig
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/mtd/spi-nor/intel-spi-pci.c
drivers/mtd/spi-nor/intel-spi.c
drivers/mtd/spi-nor/mtk-quadspi.c
drivers/mtd/spi-nor/spi-nor.c
drivers/mtd/spi-nor/stm32-quadspi.c
drivers/net/bonding/bond_netlink.c
drivers/net/caif/caif_hsi.c
drivers/net/cris/eth_v10.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6xxx/phy.c
drivers/net/eql.c
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_clsf.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40evf_client.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/netronome/nfp/bpf/offload.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfp_app.h
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/netronome/nfp/nfp_port.h
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/Kconfig
drivers/net/geneve.c
drivers/net/hamradio/scc.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/phy/cortina.c
drivers/net/phy/marvell10g.c
drivers/net/slip/slip.c
drivers/net/tap.c
drivers/net/thunderbolt.c
drivers/net/tun.c
drivers/net/usb/hso.c
drivers/net/usb/ipheth.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net. [deleted file]
drivers/net/vxlan.c
drivers/net/wan/hdlc_ppp.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath9k/channel.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/atmel/at76c50x-usb.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/cfg/a000.c
drivers/net/wireless/intel/iwlwifi/dvm/main.c
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/hostap/hostap_ap.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/intersil/orinoco/orinoco_usb.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/quantenna/qtnfmac/core.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rsi/rsi_91x_usb.c
drivers/net/wireless/st/cw1200/sta.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/xen-netfront.c
drivers/nfc/nfcmrvl/fw_dnld.c
drivers/nfc/pn533/pn533.c
drivers/nfc/st-nci/ndlc.c
drivers/nfc/st-nci/se.c
drivers/nfc/st21nfca/se.c
drivers/ntb/hw/Kconfig
drivers/ntb/hw/Makefile
drivers/ntb/hw/idt/ntb_hw_idt.c
drivers/ntb/hw/intel/ntb_hw_intel.c
drivers/ntb/hw/mscc/Kconfig [new file with mode: 0644]
drivers/ntb/hw/mscc/Makefile [new file with mode: 0644]
drivers/ntb/hw/mscc/ntb_hw_switchtec.c [new file with mode: 0644]
drivers/ntb/ntb_transport.c
drivers/ntb/test/ntb_perf.c
drivers/ntb/test/ntb_pingpong.c
drivers/ntb/test/ntb_tool.c
drivers/of/base.c
drivers/of/of_pci.c
drivers/of/unittest-data/Makefile
drivers/of/unittest-data/testcases.dts
drivers/pci/Kconfig
drivers/pci/Makefile
drivers/pci/htirq.c [deleted file]
drivers/pci/switch/switchtec.c
drivers/pcmcia/sa1111_badge4.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/dell-smbios-smm.c [new file with mode: 0644]
drivers/platform/x86/dell-smbios-wmi.c [new file with mode: 0644]
drivers/platform/x86/dell-smbios.c
drivers/platform/x86/dell-smbios.h
drivers/platform/x86/dell-smo8800.c
drivers/platform/x86/dell-wmi-descriptor.c [new file with mode: 0644]
drivers/platform/x86/dell-wmi-descriptor.h [new file with mode: 0644]
drivers/platform/x86/dell-wmi.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-wmi-thunderbolt.c [new file with mode: 0644]
drivers/platform/x86/intel_cht_int33fe.c
drivers/platform/x86/intel_ips.c
drivers/platform/x86/intel_ips.h
drivers/platform/x86/intel_punit_ipc.c
drivers/platform/x86/intel_telemetry_core.c
drivers/platform/x86/intel_telemetry_debugfs.c
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/platform/x86/intel_turbo_max_3.c
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/peaq-wmi.c
drivers/platform/x86/silead_dmi.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/wmi.c
drivers/pps/clients/pps-ktimer.c
drivers/pwm/pwm-atmel-tcb.c
drivers/pwm/pwm-img.c
drivers/pwm/pwm-mediatek.c
drivers/pwm/pwm-stm32-lp.c
drivers/pwm/pwm-sun4i.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/rapidio/switches/idt_gen2.c
drivers/rapidio/switches/idt_gen3.c
drivers/rapidio/switches/idtcps.c
drivers/rapidio/switches/tsi568.c
drivers/rapidio/switches/tsi57x.c
drivers/remoteproc/Kconfig
drivers/remoteproc/qcom_q6v5_pil.c
drivers/remoteproc/remoteproc_debugfs.c
drivers/rpmsg/Kconfig
drivers/rpmsg/qcom_glink_native.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/interface.c
drivers/rtc/rtc-abx80x.c
drivers/rtc/rtc-armada38x.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-dev.c
drivers/rtc/rtc-ds1305.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ds1390.c
drivers/rtc/rtc-ds1511.c
drivers/rtc/rtc-jz4740.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-m48t86.c
drivers/rtc/rtc-mt7622.c [new file with mode: 0644]
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-pcf8523.c
drivers/rtc/rtc-pcf85363.c [new file with mode: 0644]
drivers/rtc/rtc-pcf8563.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-rv3029c2.c
drivers/rtc/rtc-rx8010.c
drivers/rtc/rtc-sc27xx.c [new file with mode: 0644]
drivers/rtc/rtc-sysfs.c
drivers/rtc/rtc-xgene.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/xpram.c
drivers/s390/char/fs3270.c
drivers/s390/char/hmcdrv_mod.c
drivers/s390/char/monreader.c
drivers/s390/char/monwriter.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp_async.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/tape_class.c
drivers/s390/char/tape_core.c
drivers/s390/char/tty3270.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/vmur.c
drivers/s390/char/zcore.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/chp.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/cmf.c
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/isc.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/cio/scm.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_api.h
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_cca_key.h
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2a.h
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_error.h
drivers/s390/crypto/zcrypt_msgtype50.c
drivers/s390/crypto/zcrypt_msgtype50.h
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/s390/crypto/zcrypt_msgtype6.h
drivers/s390/crypto/zcrypt_pcixcc.c
drivers/s390/crypto/zcrypt_pcixcc.h
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/fsm.c
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/smsgiucv.c
drivers/s390/net/smsgiucv_app.c
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/virtio/Makefile
drivers/s390/virtio/virtio_ccw.c
drivers/sbus/char/display7seg.c
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_tmf.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/arm/fas216.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/ipr.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pmcraid.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/staging/greybus/operation.c
drivers/staging/irda/include/net/irda/timer.h
drivers/staging/lustre/lnet/lnet/net_fault.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/lustre/lustre/ptlrpc/service.c
drivers/staging/media/imx/imx-ic-prpencvf.c
drivers/staging/media/imx/imx-media-csi.c
drivers/staging/most/hdm-usb/hdm_usb.c
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
drivers/staging/rtl8712/recv_linux.c
drivers/staging/rtl8712/rtl8712_led.c
drivers/staging/speakup/main.c
drivers/staging/speakup/synth.c
drivers/staging/unisys/visorbus/visorbus_main.c
drivers/staging/unisys/visornic/visornic_main.c
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
drivers/target/iscsi/cxgbit/cxgbit.h
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
drivers/target/iscsi/cxgbit/cxgbit_main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_alua.c
drivers/target/target_core_alua.h
drivers/target/target_core_configfs.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_file.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tty/cyclades.c
drivers/tty/ipwireless/hardware.c
drivers/tty/isicom.c
drivers/tty/moxa.c
drivers/tty/n_gsm.c
drivers/tty/n_r3964.c
drivers/tty/rocket.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/crisv10.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/ifx6x60.c
drivers/tty/serial/imx.c
drivers/tty/serial/kgdb_nmi.c
drivers/tty/serial/max3100.c
drivers/tty/serial/mux.c
drivers/tty/serial/pnx8xxx_uart.c
drivers/tty/serial/sa1100.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sn_console.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/tty/vt/keyboard.c
drivers/tty/vt/vt.c
drivers/usb/atm/cxacru.c
drivers/usb/atm/speedtch.c
drivers/usb/atm/usbatm.c
drivers/usb/core/hcd.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/gadget/udc/at91_udc.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/m66592-udc.c
drivers/usb/gadget/udc/omap_udc.c
drivers/usb/gadget/udc/pxa25x_udc.c
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/oxu210hp-hcd.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/sl811-hcd.c
drivers/usb/host/uhci-hcd.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci.c
drivers/usb/serial/mos7840.c
drivers/usb/storage/realtek_cr.c
drivers/uwb/drp.c
drivers/uwb/neh.c
drivers/uwb/rsv.c
drivers/uwb/uwb-internal.h
drivers/video/fbdev/Kconfig
drivers/video/fbdev/Makefile
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/radeon_base.c
drivers/video/fbdev/aty/radeon_pm.c
drivers/video/fbdev/au1200fb.c
drivers/video/fbdev/cirrusfb.c
drivers/video/fbdev/controlfb.h
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbcon.h
drivers/video/fbdev/dnfb.c
drivers/video/fbdev/goldfishfb.c
drivers/video/fbdev/igafb.c [deleted file]
drivers/video/fbdev/intelfb/intelfbhw.c
drivers/video/fbdev/matrox/matroxfb_base.c
drivers/video/fbdev/mxsfb.c
drivers/video/fbdev/omap/hwa742.c
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
drivers/video/fbdev/pxa3xx-gcu.c
drivers/video/fbdev/sa1100fb.c
drivers/video/fbdev/sa1100fb.h
drivers/video/fbdev/sis/init301.c
drivers/video/fbdev/sis/sis_main.c
drivers/video/fbdev/sm501fb.c
drivers/video/fbdev/udlfb.c
drivers/watchdog/alim7101_wdt.c
drivers/watchdog/at91sam9_wdt.c
drivers/watchdog/bcm47xx_wdt.c
drivers/watchdog/bcm63xx_wdt.c
drivers/watchdog/cpu5wdt.c
drivers/watchdog/machzwd.c
drivers/watchdog/mixcomwd.c
drivers/watchdog/mpc8xxx_wdt.c
drivers/watchdog/mtx-1_wdt.c
drivers/watchdog/nuc900_wdt.c
drivers/watchdog/pcwd.c
drivers/watchdog/pika_wdt.c
drivers/watchdog/rdc321x_wdt.c
drivers/watchdog/sbc60xxwdt.c
drivers/watchdog/sc520_wdt.c
drivers/watchdog/shwdt.c
drivers/watchdog/via_wdt.c
drivers/watchdog/w83877f_wdt.c
drivers/watchdog/watchdog_core.c
drivers/watchdog/watchdog_dev.c
drivers/xen/grant-table.c
firmware/Makefile
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/adfs/super.c
fs/affs/amigaffs.c
fs/affs/bitmap.c
fs/affs/super.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/flock.c
fs/afs/internal.h
fs/afs/rotate.c
fs/afs/security.c
fs/afs/server_list.c
fs/afs/super.c
fs/afs/write.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/befs/ChangeLog
fs/befs/linuxvfs.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-checker.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/inode.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/ceph/super.h
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/inode.c
fs/cifs/xattr.c
fs/coda/inode.c
fs/cramfs/inode.c
fs/dax.c
fs/ecryptfs/main.c
fs/efs/super.c
fs/eventpoll.c
fs/exec.c
fs/ext2/balloc.c
fs/ext2/ialloc.c
fs/ext2/super.c
fs/ext4/inode.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/f2fs.h
fs/f2fs/gc.c
fs/f2fs/recovery.c
fs/f2fs/super.c
fs/fat/dir.c
fs/fat/fatent.c
fs/fat/inode.c
fs/fat/misc.c
fs/fat/namei_msdos.c
fs/freevxfs/vxfs_super.c
fs/fs-writeback.c
fs/fuse/inode.c
fs/gfs2/ops_fstype.c
fs/gfs2/super.c
fs/gfs2/trans.c
fs/hfs/bnode.c
fs/hfs/mdb.c
fs/hfs/super.c
fs/hfsplus/bnode.c
fs/hfsplus/super.c
fs/hpfs/map.c
fs/hpfs/super.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/isofs/inode.c
fs/jffs2/fs.c
fs/jffs2/os-linux.h
fs/jffs2/super.c
fs/jfs/super.c
fs/kernfs/mount.c
fs/libfs.c
fs/lockd/host.c
fs/lockd/mon.c
fs/lockd/svc.c
fs/lockd/svcsubs.c
fs/locks.c
fs/mbcache.c
fs/minix/inode.c
fs/namei.c
fs/ncpfs/inode.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/super.c
fs/nfs_common/grace.c
fs/nfsd/export.c
fs/nfsd/fault_inject.c
fs/nfsd/netns.h
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfssvc.c
fs/nfsd/state.h
fs/nfsd/xdr4.h
fs/nilfs2/namei.c
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/nilfs2/sufile.c
fs/nilfs2/super.c
fs/nilfs2/the_nilfs.c
fs/nilfs2/the_nilfs.h
fs/notify/fsnotify.c
fs/nsfs.c
fs/ntfs/super.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/file.c
fs/ocfs2/super.c
fs/ocfs2/xattr.c
fs/openpromfs/inode.c
fs/orangefs/acl.c
fs/orangefs/dir.c
fs/orangefs/file.c
fs/orangefs/inode.c
fs/orangefs/namei.c
fs/orangefs/orangefs-debug.h
fs/orangefs/orangefs-kernel.h
fs/orangefs/orangefs-utils.c
fs/orangefs/super.c
fs/orangefs/symlink.c
fs/overlayfs/super.c
fs/pipe.c
fs/proc/Makefile
fs/proc/array.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/loadavg.c
fs/proc/root.c
fs/proc/util.c [new file with mode: 0644]
fs/proc_namespace.c
fs/pstore/platform.c
fs/qnx4/inode.c
fs/qnx6/inode.c
fs/quota/dquot.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/prints.c
fs/reiserfs/super.c
fs/reiserfs/xattr.c
fs/romfs/super.c
fs/squashfs/super.c
fs/statfs.c
fs/sysfs/mount.c
fs/sysv/inode.c
fs/sysv/super.c
fs/ubifs/file.c
fs/ubifs/io.c
fs/ubifs/super.c
fs/ubifs/ubifs.h
fs/udf/super.c
fs/ufs/balloc.c
fs/ufs/ialloc.c
fs/ufs/super.c
fs/xfs/libxfs/xfs_iext_tree.c
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_log.c
fs/xfs/xfs_super.c
fs/xfs/xfs_super.h
include/asm-generic/bug.h
include/asm-generic/pgtable.h
include/asm-generic/sections.h
include/asm-generic/topology.h
include/asm-generic/vmlinux.lds.h
include/crypto/if_alg.h
include/drm/drm_connector.h
include/drm/drm_edid.h
include/drm/drm_mode_config.h
include/dt-bindings/clock/exynos4.h
include/dt-bindings/clock/gxbb-clkc.h
include/dt-bindings/clock/imx7d-clock.h
include/dt-bindings/clock/mt2712-clk.h [new file with mode: 0644]
include/dt-bindings/clock/mt7622-clk.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,rpmcc.h
include/dt-bindings/clock/r8a77970-cpg-mssr.h [new file with mode: 0644]
include/dt-bindings/clock/s3c2443.h
include/dt-bindings/clock/sun4i-a10-ccu.h
include/dt-bindings/clock/sun6i-a31-ccu.h
include/dt-bindings/msm/msm-bus-ids.h [deleted file]
include/kvm/arm_vgic.h
include/linux/bitfield.h
include/linux/bpf.h
include/linux/bpf_verifier.h
include/linux/bug.h
include/linux/clk-provider.h
include/linux/compiler-clang.h
include/linux/compiler.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/genalloc.h
include/linux/htirq.h [deleted file]
include/linux/hugetlb.h
include/linux/init.h
include/linux/init_task.h
include/linux/iopoll.h
include/linux/ipc_namespace.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v4.h
include/linux/kallsyms.h
include/linux/kcov.h
include/linux/kernel.h
include/linux/key-type.h
include/linux/key.h
include/linux/kthread.h
include/linux/kvm_host.h
include/linux/migrate.h
include/linux/miscdevice.h
include/linux/mm.h
include/linux/mtd/mtd.h
include/linux/mtd/nand-gpio.h
include/linux/mtd/rawnand.h
include/linux/mtd/spi-nor.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/nodemask.h
include/linux/ntb.h
include/linux/pageblock-flags.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pid.h
include/linux/pid_namespace.h
include/linux/pipe_fs_i.h
include/linux/platform_data/mtd-nand-omap2.h
include/linux/printk.h
include/linux/qcom_scm.h
include/linux/radix-tree.h
include/linux/reboot.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/soc/qcom/smd-rpm.h
include/linux/sunrpc/cache.h
include/linux/sunrpc/svc.h
include/linux/switchtec.h [new file with mode: 0644]
include/linux/sysctl.h
include/linux/timekeeper_internal.h
include/linux/timekeeping.h
include/linux/timer.h
include/linux/trace_events.h
include/linux/virtio_net.h
include/linux/wmi.h
include/linux/workqueue.h
include/linux/writeback.h
include/net/genetlink.h
include/net/ipv6.h
include/net/mac80211.h
include/net/sctp/checksum.h
include/net/sctp/sctp.h
include/net/sctp/stream_sched.h
include/net/tcp.h
include/scsi/scsi_device.h
include/scsi/scsi_devinfo.h
include/soc/at91/atmel-sfr.h
include/sound/control.h
include/target/target_core_base.h
include/trace/events/dma_fence.h
include/trace/events/preemptirq.h [new file with mode: 0644]
include/trace/events/rxrpc.h
include/trace/events/sched.h
include/trace/events/sunrpc.h
include/trace/events/thermal.h
include/trace/events/vmscan.h
include/trace/events/xen.h
include/uapi/linux/bfs_fs.h
include/uapi/linux/bpf.h
include/uapi/linux/kcov.h
include/uapi/linux/rxrpc.h
include/uapi/linux/seg6.h
include/uapi/linux/vm_sockets_diag.h
include/uapi/linux/wmi.h [new file with mode: 0644]
include/video/iga.h [deleted file]
init/Kconfig
init/initramfs.c
init/main.c
init/version.c
ipc/mqueue.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/bpf/offload.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/crash_core.c
kernel/events/core.c
kernel/fork.c
kernel/irq/manage.c
kernel/irq/matrix.c
kernel/irq/spurious.c
kernel/jump_label.c
kernel/kallsyms.c
kernel/kcov.c
kernel/kthread.c
kernel/module.c
kernel/padata.c
kernel/panic.c
kernel/pid.c
kernel/pid_namespace.c
kernel/printk/printk.c
kernel/printk/printk_safe.c
kernel/reboot.c
kernel/signal.c
kernel/sysctl.c
kernel/time/Kconfig
kernel/time/clocksource.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/time/timer_list.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_selftest.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
kernel/trace/tracing_map.c
kernel/trace/tracing_map.h
kernel/umh.c
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.debug
lib/Makefile
lib/bug.c
lib/dma-debug.c
lib/dynamic_debug.c
lib/genalloc.c
lib/int_sqrt.c
lib/interval_tree_test.c
lib/nmi_backtrace.c
lib/random32.c
lib/rbtree_test.c
lib/string.c
lib/test_find_bit.c [new file with mode: 0644]
lib/test_kasan.c
lib/test_kmod.c
lib/test_list_sort.c
lib/test_printf.c
lib/test_string.c [new file with mode: 0644]
lib/vsprintf.c
mm/Kconfig
mm/Makefile
mm/compaction.c
mm/frame_vector.c
mm/gup.c
mm/gup_benchmark.c [new file with mode: 0644]
mm/hmm.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/kasan/report.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/shmem.c
mm/z3fold.c
net/802/garp.c
net/802/mrp.c
net/9p/client.c
net/9p/trans_fd.c
net/9p/trans_virtio.c
net/9p/trans_xen.c
net/appletalk/aarp.c
net/appletalk/ddp.c
net/atm/lec.c
net/atm/mpc.c
net/batman-adv/tp_meter.c
net/bluetooth/hidp/core.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/can/proc.c
net/ceph/ceph_hash.c
net/ceph/crypto.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/core/dev.c
net/core/drop_monitor.c
net/core/filter.c
net/core/gen_estimator.c
net/core/neighbour.c
net/decnet/dn_route.c
net/decnet/dn_timer.c
net/dsa/dsa2.c
net/ipv4/af_inet.c
net/ipv4/igmp.c
net/ipv4/ipmr.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/output_core.c
net/ipv6/route.c
net/ipv6/udp_offload.c
net/lapb/lapb_timer.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/led.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mlme.c
net/mac80211/ocb.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/ncsi/ncsi-manage.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nfnetlink_log.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_LED.c
net/netlabel/netlabel_addrlist.h
net/netrom/af_netrom.c
net/netrom/nr_loopback.c
net/netrom/nr_timer.c
net/nfc/nci/core.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/packet/internal.h
net/rose/rose_link.c
net/rose/rose_timer.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/misc.c
net/rxrpc/net_ns.c
net/rxrpc/output.c
net/rxrpc/recvmsg.c
net/rxrpc/sendmsg.c
net/rxrpc/sysctl.c
net/sched/act_csum.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/sch_cbq.c
net/sched/sch_sfq.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/sctp/stream.c
net/sctp/stream_sched.c
net/sctp/stream_sched_prio.c
net/sctp/stream_sched_rr.c
net/smc/smc_core.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/group.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/node.c
net/vmw_vsock/vmci_transport.c
net/wireless/Kconfig
net/wireless/lib80211.c
net/wireless/nl80211.c
net/wireless/reg.c
net/x25/af_x25.c
net/x25/x25_link.c
net/x25/x25_timer.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/bpf/Makefile
samples/hidraw/Makefile
samples/seccomp/Makefile
samples/sockmap/Makefile
samples/statx/Makefile
samples/uhid/Makefile
scripts/Kbuild.include
scripts/Makefile.asm-generic
scripts/Makefile.build
scripts/Makefile.headersinst
scripts/Makefile.help [deleted file]
scripts/Makefile.host
scripts/Makefile.kcov [new file with mode: 0644]
scripts/Makefile.lib
scripts/Makefile.modpost
scripts/bloat-o-meter
scripts/checkpatch.pl
scripts/coccicheck
scripts/coccinelle/api/check_bq27xxx_data.cocci [new file with mode: 0644]
scripts/coccinelle/api/setup_timer.cocci [deleted file]
scripts/coccinelle/iterators/list_entry_update.cocci
scripts/coccinelle/misc/ifcol.cocci
scripts/coccinelle/misc/orplus.cocci
scripts/coccinelle/null/badzero.cocci
scripts/faddr2line
scripts/get_maintainer.pl
scripts/kconfig/symbol.c
scripts/kernel-doc
scripts/link-vmlinux.sh
scripts/mkcompile_h
scripts/package/Makefile
scripts/package/builddeb
scripts/package/mkspec
scripts/parse-maintainers.pl
scripts/selinux/Makefile
scripts/spelling.txt
security/apparmor/apparmorfs.c
security/apparmor/domain.c
security/apparmor/file.c
security/apparmor/include/lib.h
security/apparmor/label.c
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/mount.c
security/apparmor/policy.c
security/apparmor/policy_ns.c
security/apparmor/policy_unpack.c
security/apparmor/resource.c
security/integrity/ima/ima_appraise.c
security/keys/gc.c
security/keys/internal.h
security/keys/key.c
security/keys/keyring.c
security/keys/permission.c
security/keys/proc.c
security/keys/process_keys.c
sound/core/pcm_lib.c
sound/core/timer_compat.c
sound/core/vmaster.c
sound/hda/hdmi_chmap.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/intel/Kconfig
sound/usb/clock.c
sound/usb/line6/driver.c
sound/usb/mixer.c
tools/Makefile
tools/bpf/bpftool/prog.c
tools/include/uapi/linux/bpf.h
tools/lib/traceevent/parse-filter.c
tools/objtool/.gitignore
tools/objtool/Makefile
tools/objtool/arch/x86/Build
tools/objtool/arch/x86/decode.c
tools/objtool/arch/x86/include/asm/inat.h [new file with mode: 0644]
tools/objtool/arch/x86/include/asm/inat_types.h [new file with mode: 0644]
tools/objtool/arch/x86/include/asm/insn.h [new file with mode: 0644]
tools/objtool/arch/x86/include/asm/orc_types.h [new file with mode: 0644]
tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk [deleted file]
tools/objtool/arch/x86/insn/inat.c [deleted file]
tools/objtool/arch/x86/insn/inat.h [deleted file]
tools/objtool/arch/x86/insn/inat_types.h [deleted file]
tools/objtool/arch/x86/insn/insn.c [deleted file]
tools/objtool/arch/x86/insn/insn.h [deleted file]
tools/objtool/arch/x86/insn/x86-opcode-map.txt [deleted file]
tools/objtool/arch/x86/lib/inat.c [new file with mode: 0644]
tools/objtool/arch/x86/lib/insn.c [new file with mode: 0644]
tools/objtool/arch/x86/lib/x86-opcode-map.txt [new file with mode: 0644]
tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk [new file with mode: 0644]
tools/objtool/orc.h
tools/objtool/orc_types.h [deleted file]
tools/objtool/sync-check.sh [new file with mode: 0755]
tools/power/cpupower/Makefile
tools/power/cpupower/bench/system.c
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
tools/scripts/Makefile.include
tools/testing/selftests/Makefile
tools/testing/selftests/android/Makefile [new file with mode: 0644]
tools/testing/selftests/android/ion/.gitignore [new file with mode: 0644]
tools/testing/selftests/android/ion/Makefile [new file with mode: 0644]
tools/testing/selftests/android/ion/README [new file with mode: 0644]
tools/testing/selftests/android/ion/config [new file with mode: 0644]
tools/testing/selftests/android/ion/ion.h [new file with mode: 0644]
tools/testing/selftests/android/ion/ion_test.sh [new file with mode: 0755]
tools/testing/selftests/android/ion/ionapp_export.c [new file with mode: 0644]
tools/testing/selftests/android/ion/ionapp_import.c [new file with mode: 0644]
tools/testing/selftests/android/ion/ionutils.c [new file with mode: 0644]
tools/testing/selftests/android/ion/ionutils.h [new file with mode: 0644]
tools/testing/selftests/android/ion/ipcsocket.c [new file with mode: 0644]
tools/testing/selftests/android/ion/ipcsocket.h [new file with mode: 0644]
tools/testing/selftests/android/run.sh [new file with mode: 0755]
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
tools/testing/selftests/cpu-hotplug/config
tools/testing/selftests/exec/execveat.c
tools/testing/selftests/firmware/fw_fallback.sh
tools/testing/selftests/firmware/fw_filesystem.sh
tools/testing/selftests/ftrace/config
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/ftrace/test.d/00basic/basic4.tc
tools/testing/selftests/ftrace/test.d/event/event-enable.tc
tools/testing/selftests/ftrace/test.d/event/event-pid.tc
tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
tools/testing/selftests/ftrace/test.d/instances/instance.tc
tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
tools/testing/selftests/ftrace/test.d/template
tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc
tools/testing/selftests/memfd/memfd_test.c
tools/testing/selftests/memory-hotplug/Makefile
tools/testing/selftests/seccomp/.gitignore
tools/testing/selftests/timers/.gitignore
tools/testing/selftests/vDSO/vdso_test.c
tools/testing/selftests/vm/.gitignore
tools/testing/selftests/vm/Makefile
tools/testing/selftests/vm/gup_benchmark.c [new file with mode: 0644]
tools/testing/selftests/x86/5lvl.c [new file with mode: 0644]
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/mpx-hw.h
tools/testing/selftests/x86/pkey-helpers.h
tools/testing/selftests/x86/protection_keys.c
tools/wmi/Makefile [new file with mode: 0644]
tools/wmi/dell-smbios-example.c [new file with mode: 0644]
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic-v4.c [new file with mode: 0644]
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h
virt/kvm/kvm_main.c

index 6c119eab5d46921575fe606e9fe362ab5190791e..f6050b88e95b5b59e2c08dbef865d92049956e58 100644 (file)
@@ -55,6 +55,11 @@ modules.builtin
 /System.map
 /Module.markers
 
+#
+# RPM spec file (make rpm-pkg)
+#
+/*.spec
+
 #
 # Debian directory (make deb-pkg)
 #
diff --git a/Documentation/ABI/testing/dell-smbios-wmi b/Documentation/ABI/testing/dell-smbios-wmi
new file mode 100644 (file)
index 0000000..fc919ce
--- /dev/null
@@ -0,0 +1,41 @@
+What:          /dev/wmi/dell-smbios
+Date:          November 2017
+KernelVersion: 4.15
+Contact:       "Mario Limonciello" <mario.limonciello@dell.com>
+Description:
+               Perform SMBIOS calls on supported Dell machines.
+               through the Dell ACPI-WMI interface.
+
+               IOCTL's and buffer formats are defined in:
+               <uapi/linux/wmi.h>
+
+               1) To perform an SMBIOS call from userspace, you'll need to
+               first determine the minimum size of the calling interface
+               buffer for your machine.
+               Platforms that contain larger buffers can return larger
+               objects from the system firmware.
+               Commonly this size is either 4k or 32k.
+
+               To determine the size of the buffer read() a u64 dword from
+               the WMI character device /dev/wmi/dell-smbios.
+
+               2) After you've determined the minimum size of the calling
+               interface buffer, you can allocate a structure that represents
+               the structure documented above.
+
+               3) In the 'length' object store the size of the buffer you
+               determined above and allocated.
+
+               4) In this buffer object, prepare as necessary for the SMBIOS
+               call you're interested in.  Typically SMBIOS buffers have
+               "class", "select", and "input" defined to values that coincide
+               with the data you are interested in.
+               Documenting class/select/input values is outside of the scope
+               of this documentation. Check with the libsmbios project for
+               further documentation on these values.
+
+               6) Run the call by using ioctl() as described in the header.
+
+               7) The output will be returned in the buffer object.
+
+               8) Be sure to free up your allocated object.
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-smbios b/Documentation/ABI/testing/sysfs-platform-dell-smbios
new file mode 100644 (file)
index 0000000..205d3b6
--- /dev/null
@@ -0,0 +1,21 @@
+What:          /sys/devices/platform/<platform>/tokens/*
+Date:          November 2017
+KernelVersion: 4.15
+Contact:       "Mario Limonciello" <mario.limonciello@dell.com>
+Description:
+               A read-only description of Dell platform tokens
+               available on the machine.
+
+               Each token attribute is available as a pair of
+               sysfs attributes readable by a process with
+               CAP_SYS_ADMIN.
+
+               For example the token ID "5" would be available
+               as the following attributes:
+
+               0005_location
+               0005_value
+
+               Tokens will vary from machine to machine, and
+               only tokens available on that machine will be
+               displayed.
diff --git a/Documentation/ABI/testing/sysfs-platform-intel-wmi-thunderbolt b/Documentation/ABI/testing/sysfs-platform-intel-wmi-thunderbolt
new file mode 100644 (file)
index 0000000..8af6505
--- /dev/null
@@ -0,0 +1,11 @@
+What:          /sys/devices/platform/<platform>/force_power
+Date:          September 2017
+KernelVersion: 4.15
+Contact:       "Mario Limonciello" <mario.limonciello@dell.com>
+Description:
+               Modify the platform force power state, influencing
+               Thunderbolt controllers to turn on or off when no
+               devices are connected (write-only)
+               There are two available states:
+                   * 0 -> Force power disabled
+                   * 1 -> Force power enabled
index 12278a926370a136e978feb31c94fa8763e9cdea..fdf72429f8019cd68ab8e7557cdb4c7376e50176 100644 (file)
@@ -18,7 +18,7 @@ shortcut for ``print_hex_dump(KERN_DEBUG)``.
 
 For ``print_hex_dump_debug()``/``print_hex_dump_bytes()``, format string is
 its ``prefix_str`` argument, if it is constant string; or ``hexdump``
-in case ``prefix_str`` is build dynamically.
+in case ``prefix_str`` is built dynamically.
 
 Dynamic debug has even more useful features:
 
@@ -197,8 +197,8 @@ line
     line number matches the callsite line number exactly.  A
     range of line numbers matches any callsite between the first
     and last line number inclusive.  An empty first number means
-    the first line in the file, an empty line number means the
-    last number in the file.  Examples::
+    the first line in the file, an empty last line number means the
+    last line number in the file.  Examples::
 
        line 1603           // exactly line 1603
        line 1600-1605      // the six lines from line 1600 to line 1605
index 62436bd5f34a730b5e0c15e38138970605d9f8c0..6571fbfdb2a1527c25b3a01e9c4228c84adce639 100644 (file)
                        [KVM,ARM] Trap guest accesses to GICv3 common
                        system registers
 
+       kvm-arm.vgic_v4_enable=
+                       [KVM,ARM] Allow use of GICv4 for direct injection of
+                       LPIs.
+
        kvm-intel.ept=  [KVM,Intel] Disable extended page tables
                        (virtualized MMU) support on capable Intel chips.
                        Default is 1 (enabled)
                        instead using the legacy FADT method
 
        profile=        [KNL] Enable kernel profiling via /proc/profile
-                       Format: [schedule,]<number>
+                       Format: [<profiletype>,]<number>
+                       Param: <profiletype>: "schedule", "sleep", or "kvm"
+                               [defaults to kernel profiling]
                        Param: "schedule" - profile schedule points.
-                       Param: <number> - step/bucket size as a power of 2 for
-                               statistical time based profiling.
                        Param: "sleep" - profile D-state sleeping (millisecs).
                                Requires CONFIG_SCHEDSTATS
                        Param: "kvm" - profile VM exits.
+                       Param: <number> - step/bucket size as a power of 2 for
+                               statistical time based profiling.
 
        prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk
                        before loading.
index 5c62d11d77e86f1709591ba4f3b2abd0e5530a32..de50a8561774249351515662404e2a1f8328aba6 100644 (file)
@@ -221,3 +221,18 @@ The driver will create one virtual ethernet interface per Thunderbolt
 port which are named like ``thunderbolt0`` and so on. From this point
 you can either use standard userspace tools like ``ifconfig`` to
 configure the interface or let your GUI to handle it automatically.
+
+Forcing power
+-------------
+Many OEMs include a method that can be used to force the power of a
+thunderbolt controller to an "On" state even if nothing is connected.
+If supported by your machine this will be exposed by the WMI bus with
+a sysfs attribute called "force_power".
+
+For example the intel-wmi-thunderbolt driver exposes this attribute in:
+  /sys/devices/platform/PNP0C14:00/wmi_bus/wmi_bus-PNP0C14:00/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power
+
+  To force the power to on, write 1 to this attribute file.
+  To disable force power, write 0 to this attribute file.
+
+Note: it's currently not possible to query the force power state of a platform.
diff --git a/Documentation/clearing-warn-once.txt b/Documentation/clearing-warn-once.txt
new file mode 100644 (file)
index 0000000..5b1f5d5
--- /dev/null
@@ -0,0 +1,7 @@
+
+WARN_ONCE / WARN_ON_ONCE only print a warning once.
+
+echo 1 > /sys/kernel/debug/clear_warn_once
+
+clears the state and allows the warnings to print once again.
+This can be useful after test suite runs to reproduce problems.
index 1062ddba62c7608bb96f4211e2a6a0863f8a47c3..2ac3f9f2984531dda8a28ac9daaf3aa3d0ea8985 100644 (file)
@@ -177,18 +177,14 @@ Here is a sample module which implements a basic per cpu counter using
                     printk("Read : CPU %d, count %ld\n", cpu,
                             local_read(&per_cpu(counters, cpu)));
             }
-            del_timer(&test_timer);
-            test_timer.expires = jiffies + 1000;
-            add_timer(&test_timer);
+            mod_timer(&test_timer, jiffies + 1000);
     }
 
     static int __init test_init(void)
     {
             /* initialize the timer that will increment the counter */
-            init_timer(&test_timer);
-            test_timer.function = do_test_timer;
-            test_timer.expires = jiffies + 1;
-            add_timer(&test_timer);
+            timer_setup(&test_timer, do_test_timer, 0);
+            mod_timer(&test_timer, jiffies + 1);
 
             return 0;
     }
index 37e474ff69115da89dd780475a35d119d5ee4383..94f41c290bfc69eb417066c0cff30f361f1c9bcc 100644 (file)
@@ -33,9 +33,6 @@ of many distributions, e.g. :
 You can get the latest version released from the Coccinelle homepage at
 http://coccinelle.lip6.fr/
 
-Information and tips about Coccinelle are also provided on the wiki
-pages at http://cocci.ekstranet.diku.dk/wiki/doku.php
-
 Once you have it, run the following command::
 
        ./configure
index 44886c91e112d4d21a41e0c4d1a96f37a584aa68..c2f6452e38ed000edc63440ad66b49da63aed481 100644 (file)
@@ -12,19 +12,30 @@ To achieve this goal it does not collect coverage in soft/hard interrupts
 and instrumentation of some inherently non-deterministic parts of kernel is
 disabled (e.g. scheduler, locking).
 
-Usage
------
+kcov is also able to collect comparison operands from the instrumented code
+(this feature currently requires that the kernel is compiled with clang).
+
+Prerequisites
+-------------
 
 Configure the kernel with::
 
         CONFIG_KCOV=y
 
 CONFIG_KCOV requires gcc built on revision 231296 or later.
+
+If the comparison operands need to be collected, set::
+
+       CONFIG_KCOV_ENABLE_COMPARISONS=y
+
 Profiling data will only become accessible once debugfs has been mounted::
 
         mount -t debugfs none /sys/kernel/debug
 
-The following program demonstrates kcov usage from within a test program:
+Coverage collection
+-------------------
+The following program demonstrates coverage collection from within a test
+program using kcov:
 
 .. code-block:: c
 
@@ -44,6 +55,9 @@ The following program demonstrates kcov usage from within a test program:
     #define KCOV_DISABLE                       _IO('c', 101)
     #define COVER_SIZE                 (64<<10)
 
+    #define KCOV_TRACE_PC  0
+    #define KCOV_TRACE_CMP 1
+
     int main(int argc, char **argv)
     {
        int fd;
@@ -64,7 +78,7 @@ The following program demonstrates kcov usage from within a test program:
        if ((void*)cover == MAP_FAILED)
                perror("mmap"), exit(1);
        /* Enable coverage collection on the current thread. */
-       if (ioctl(fd, KCOV_ENABLE, 0))
+       if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_PC))
                perror("ioctl"), exit(1);
        /* Reset coverage from the tail of the ioctl() call. */
        __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
@@ -111,3 +125,80 @@ The interface is fine-grained to allow efficient forking of test processes.
 That is, a parent process opens /sys/kernel/debug/kcov, enables trace mode,
 mmaps coverage buffer and then forks child processes in a loop. Child processes
 only need to enable coverage (disable happens automatically on thread end).
+
+Comparison operands collection
+------------------------------
+Comparison operands collection is similar to coverage collection:
+
+.. code-block:: c
+
+    /* Same includes and defines as above. */
+
+    /* Number of 64-bit words per record. */
+    #define KCOV_WORDS_PER_CMP 4
+
+    /*
+     * The format for the types of collected comparisons.
+     *
+     * Bit 0 shows whether one of the arguments is a compile-time constant.
+     * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
+     */
+
+    #define KCOV_CMP_CONST          (1 << 0)
+    #define KCOV_CMP_SIZE(n)        ((n) << 1)
+    #define KCOV_CMP_MASK           KCOV_CMP_SIZE(3)
+
+    int main(int argc, char **argv)
+    {
+       int fd;
+       uint64_t *cover, type, arg1, arg2, is_const, size;
+       unsigned long n, i;
+
+       fd = open("/sys/kernel/debug/kcov", O_RDWR);
+       if (fd == -1)
+               perror("open"), exit(1);
+       if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
+               perror("ioctl"), exit(1);
+       /*
+       * Note that the buffer pointer is of type uint64_t*, because all
+       * the comparison operands are promoted to uint64_t.
+       */
+       cover = (uint64_t *)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+                                    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+       if ((void*)cover == MAP_FAILED)
+               perror("mmap"), exit(1);
+       /* Note KCOV_TRACE_CMP instead of KCOV_TRACE_PC. */
+       if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_CMP))
+               perror("ioctl"), exit(1);
+       __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
+       read(-1, NULL, 0);
+       /* Read number of comparisons collected. */
+       n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+       for (i = 0; i < n; i++) {
+               type = cover[i * KCOV_WORDS_PER_CMP + 1];
+               /* arg1 and arg2 - operands of the comparison. */
+               arg1 = cover[i * KCOV_WORDS_PER_CMP + 2];
+               arg2 = cover[i * KCOV_WORDS_PER_CMP + 3];
+               /* ip - caller address. */
+               ip = cover[i * KCOV_WORDS_PER_CMP + 4];
+               /* size of the operands. */
+               size = 1 << ((type & KCOV_CMP_MASK) >> 1);
+               /* is_const - true if either operand is a compile-time constant.*/
+               is_const = type & KCOV_CMP_CONST;
+               printf("ip: 0x%lx type: 0x%lx, arg1: 0x%lx, arg2: 0x%lx, "
+                       "size: %lu, %s\n",
+                       ip, type, arg1, arg2, size,
+               is_const ? "const" : "non-const");
+       }
+       if (ioctl(fd, KCOV_DISABLE, 0))
+               perror("ioctl"), exit(1);
+       /* Free resources. */
+       if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
+               perror("munmap"), exit(1);
+       if (close(fd))
+               perror("close"), exit(1);
+       return 0;
+    }
+
+Note that the kcov modes (coverage collection or comparison operands) are
+mutually exclusive.
index cd977db7630c50a6e118e60edd96256efa138ac9..b404d592ce58a6f129b6d652482b85c2ebbab21f 100644 (file)
@@ -7,7 +7,9 @@ Required Properties:
 
 - compatible: Should be one of:
        - "mediatek,mt2701-apmixedsys"
+       - "mediatek,mt2712-apmixedsys", "syscon"
        - "mediatek,mt6797-apmixedsys"
+       - "mediatek,mt7622-apmixedsys"
        - "mediatek,mt8135-apmixedsys"
        - "mediatek,mt8173-apmixedsys"
 - #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
new file mode 100644 (file)
index 0000000..9b8f578
--- /dev/null
@@ -0,0 +1,22 @@
+MediaTek AUDSYS controller
+============================
+
+The MediaTek AUDSYS controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+       - "mediatek,mt7622-audsys", "syscon"
+- #clock-cells: Must be 1
+
+The AUDSYS controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+audsys: audsys@11220000 {
+       compatible = "mediatek,mt7622-audsys", "syscon";
+       reg = <0 0x11220000 0 0x1000>;
+       #clock-cells = <1>;
+};
index 4137196dd686fc0648d3a785c2b3462ac47c11cd..4010e37c53a0218554edfb5885262a168ba73108 100644 (file)
@@ -7,6 +7,7 @@ Required Properties:
 
 - compatible: Should be:
        - "mediatek,mt2701-bdpsys", "syscon"
+       - "mediatek,mt2712-bdpsys", "syscon"
 - #clock-cells: Must be 1
 
 The bdpsys controller uses the common clk binding from
index 768f3a5bc05521a6bd3bc34e1b7311f2366e794f..7aa3fa167668f424464f8f9fe12078ea08f0bc0b 100644 (file)
@@ -7,6 +7,7 @@ Required Properties:
 
 - compatible: Should be:
        - "mediatek,mt2701-ethsys", "syscon"
+       - "mediatek,mt7622-ethsys", "syscon"
 - #clock-cells: Must be 1
 
 The ethsys controller uses the common clk binding from
index beed7b594ceaab959185f97acfa6feff7a7e2e55..f5629d64cef2d2dfa218dabb7646abd50b135a08 100644 (file)
@@ -8,6 +8,7 @@ Required Properties:
 
 - compatible: Should be:
        - "mediatek,mt2701-hifsys", "syscon"
+       - "mediatek,mt7622-hifsys", "syscon"
 - #clock-cells: Must be 1
 
 The hifsys controller uses the common clk binding from
index 047b11ae5f45c0a7e020a231a4e87ce656b3842e..868bd51a98befcb5d901bf67995525e9dceb8173 100644 (file)
@@ -7,6 +7,7 @@ Required Properties:
 
 - compatible: Should be one of:
        - "mediatek,mt2701-imgsys", "syscon"
+       - "mediatek,mt2712-imgsys", "syscon"
        - "mediatek,mt6797-imgsys", "syscon"
        - "mediatek,mt8173-imgsys", "syscon"
 - #clock-cells: Must be 1
index 58d58e2006b83324502d3d39739ab6537552649b..566f153f9f83b29ef7677ea7cd8412486e4154d4 100644 (file)
@@ -8,7 +8,9 @@ Required Properties:
 
 - compatible: Should be one of:
        - "mediatek,mt2701-infracfg", "syscon"
+       - "mediatek,mt2712-infracfg", "syscon"
        - "mediatek,mt6797-infracfg", "syscon"
+       - "mediatek,mt7622-infracfg", "syscon"
        - "mediatek,mt8135-infracfg", "syscon"
        - "mediatek,mt8173-infracfg", "syscon"
 - #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,jpgdecsys.txt
new file mode 100644 (file)
index 0000000..2df799c
--- /dev/null
@@ -0,0 +1,22 @@
+Mediatek jpgdecsys controller
+============================
+
+The Mediatek jpgdecsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt2712-jpgdecsys", "syscon"
+- #clock-cells: Must be 1
+
+The jpgdecsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+jpgdecsys: syscon@19000000 {
+       compatible = "mediatek,mt2712-jpgdecsys", "syscon";
+       reg = <0 0x19000000 0 0x1000>;
+       #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
new file mode 100644 (file)
index 0000000..b8fb03f
--- /dev/null
@@ -0,0 +1,22 @@
+Mediatek mcucfg controller
+============================
+
+The Mediatek mcucfg controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+       - "mediatek,mt2712-mcucfg", "syscon"
+- #clock-cells: Must be 1
+
+The mcucfg controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+mcucfg: syscon@10220000 {
+       compatible = "mediatek,mt2712-mcucfg", "syscon";
+       reg = <0 0x10220000 0 0x1000>;
+       #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
new file mode 100644 (file)
index 0000000..859e67b
--- /dev/null
@@ -0,0 +1,22 @@
+Mediatek mfgcfg controller
+============================
+
+The Mediatek mfgcfg controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+       - "mediatek,mt2712-mfgcfg", "syscon"
+- #clock-cells: Must be 1
+
+The mfgcfg controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+mfgcfg: syscon@13000000 {
+       compatible = "mediatek,mt2712-mfgcfg", "syscon";
+       reg = <0 0x13000000 0 0x1000>;
+       #clock-cells = <1>;
+};
index 70529e0b58e9a15927a552ada8106434c1627100..4eb8bbe15c01cd4828a75a9acb1d8913e231e515 100644 (file)
@@ -7,6 +7,7 @@ Required Properties:
 
 - compatible: Should be one of:
        - "mediatek,mt2701-mmsys", "syscon"
+       - "mediatek,mt2712-mmsys", "syscon"
        - "mediatek,mt6797-mmsys", "syscon"
        - "mediatek,mt8173-mmsys", "syscon"
 - #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt
new file mode 100644 (file)
index 0000000..d5d5f12
--- /dev/null
@@ -0,0 +1,22 @@
+MediaTek PCIESYS controller
+============================
+
+The MediaTek PCIESYS controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt7622-pciesys", "syscon"
+- #clock-cells: Must be 1
+
+The PCIESYS controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+pciesys: pciesys@1a100800 {
+       compatible = "mediatek,mt7622-pciesys", "syscon";
+       reg = <0 0x1a100800 0 0x1000>;
+       #clock-cells = <1>;
+};
index e494366782aaf79e1a092be3dd701dd2c22b5a37..fb58ca8c2770b5924baf3b624fb6828260553a0d 100644 (file)
@@ -8,6 +8,8 @@ Required Properties:
 
 - compatible: Should be one of:
        - "mediatek,mt2701-pericfg", "syscon"
+       - "mediatek,mt2712-pericfg", "syscon"
+       - "mediatek,mt7622-pericfg", "syscon"
        - "mediatek,mt8135-pericfg", "syscon"
        - "mediatek,mt8173-pericfg", "syscon"
 - #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
new file mode 100644 (file)
index 0000000..d113b8e
--- /dev/null
@@ -0,0 +1,22 @@
+MediaTek SGMIISYS controller
+============================
+
+The MediaTek SGMIISYS controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt7622-sgmiisys", "syscon"
+- #clock-cells: Must be 1
+
+The SGMIISYS controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+sgmiisys: sgmiisys@1b128000 {
+       compatible = "mediatek,mt7622-sgmiisys", "syscon";
+       reg = <0 0x1b128000 0 0x1000>;
+       #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt
new file mode 100644 (file)
index 0000000..0076001
--- /dev/null
@@ -0,0 +1,22 @@
+MediaTek SSUSBSYS controller
+============================
+
+The MediaTek SSUSBSYS controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be:
+       - "mediatek,mt7622-ssusbsys", "syscon"
+- #clock-cells: Must be 1
+
+The SSUSBSYS controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+ssusbsys: ssusbsys@1a000000 {
+       compatible = "mediatek,mt7622-ssusbsys", "syscon";
+       reg = <0 0x1a000000 0 0x1000>;
+       #clock-cells = <1>;
+};
index ec93ecbb9f3c2fb72bf461d8d57275d8c57ac79a..24014a7e2332370202fcbf2ffc31ede87df571f8 100644 (file)
@@ -7,7 +7,9 @@ Required Properties:
 
 - compatible: Should be one of:
        - "mediatek,mt2701-topckgen"
+       - "mediatek,mt2712-topckgen", "syscon"
        - "mediatek,mt6797-topckgen"
+       - "mediatek,mt7622-topckgen"
        - "mediatek,mt8135-topckgen"
        - "mediatek,mt8173-topckgen"
 - #clock-cells: Must be 1
index d150104f928a4f7c023a724a3e646eb11b49d83a..ea40d05089f8306b2b15e2b0cd304ef61b8f8602 100644 (file)
@@ -7,6 +7,7 @@ Required Properties:
 
 - compatible: Should be one of:
        - "mediatek,mt2701-vdecsys", "syscon"
+       - "mediatek,mt2712-vdecsys", "syscon"
        - "mediatek,mt6797-vdecsys", "syscon"
        - "mediatek,mt8173-vdecsys", "syscon"
 - #clock-cells: Must be 1
index 8a93be643647d429c39ba3c978e6e1a02f94d1c4..851545357e94af7f3ec4a96ac882cded0d166d7e 100644 (file)
@@ -6,6 +6,7 @@ The Mediatek vencsys controller provides various clocks to the system.
 Required Properties:
 
 - compatible: Should be one of:
+       - "mediatek,mt2712-vencsys", "syscon"
        - "mediatek,mt6797-vencsys", "syscon"
        - "mediatek,mt8173-vencsys", "syscon"
 - #clock-cells: Must be 1
index 2cba012f5af07690f9019ebc4e8097d60d5a69c6..6030afb10b5c15558dfd27ed40f55d4f12171ec6 100644 (file)
@@ -33,6 +33,12 @@ Required Properties:
 - clock-names: Aliases for the above clocks. They should be "pll_ref",
   "pll_in", "cdclk", "sclk_audio", and "sclk_pcm_in" respectively.
 
+Optional Properties:
+
+  - power-domains: a phandle to respective power domain node as described by
+    generic PM domain bindings (see power/power_domain.txt for more
+    information).
+
 The following is the list of clocks generated by the controller. Each clock is
 assigned an identifier and client nodes use this identifier to specify the
 clock which they consume. Some of the clocks are available only on a particular
index f5a5b19ed3b23bfd11631b9c14641e2ba3c484e3..bc61c952cb0b7221ccd47c099199d06598daf91e 100644 (file)
@@ -41,3 +41,46 @@ Example 2: UART controller node that consumes the clock generated by the clock
                clocks = <&clock CLK_UART2>, <&clock CLK_SCLK_UART2>;
                clock-names = "uart", "clk_uart_baud0";
        };
+
+Exynos4412 SoC contains some additional clocks for FIMC-ISP (Camera ISP)
+subsystem. Registers for those clocks are located in the ISP power domain.
+Because those registers are also located in a different memory region than
+the main clock controller, a separate clock controller has to be defined for
+handling them.
+
+Required Properties:
+
+- compatible: should be "samsung,exynos4412-isp-clock".
+
+- reg: physical base address of the ISP clock controller and length of memory
+  mapped region.
+
+- #clock-cells: should be 1.
+
+- clocks: list of the clock controller input clock identifiers,
+  from common clock bindings, should point to CLK_ACLK200 and
+  CLK_ACLK400_MCUISP clocks from the main clock controller.
+
+- clock-names: list of the clock controller input clock names,
+  as described in clock-bindings.txt, should be "aclk200" and
+  "aclk400_mcuisp".
+
+- power-domains: a phandle to ISP power domain node as described by
+  generic PM domain bindings.
+
+Example 3: The clock controllers bindings for Exynos4412 SoCs.
+
+       clock: clock-controller@10030000 {
+               compatible = "samsung,exynos4412-clock";
+               reg = <0x10030000 0x18000>;
+               #clock-cells = <1>;
+       };
+
+       isp_clock: clock-controller@10048000 {
+               compatible = "samsung,exynos4412-isp-clock";
+               reg = <0x10048000 0x1000>;
+               #clock-cells = <1>;
+               power-domains = <&pd_isp>;
+               clocks = <&clock CLK_ACLK200>, <&clock CLK_ACLK400_MCUISP>;
+               clock-names = "aclk200", "aclk400_mcuisp";
+       };
index fe885abc9cb44874b2e7b7d42d6118a7a0828533..c473dd38dd550ccc03d9fc219095dc9257007376 100644 (file)
@@ -168,6 +168,11 @@ Required Properties:
                - aclk_cam1_400
                - aclk_cam1_552
 
+Optional properties:
+  - power-domains: a phandle to respective power domain node as described by
+       generic PM domain bindings (see power/power_domain.txt for more
+       information).
+
 Each clock is assigned an identifier and client nodes can use this identifier
 to specify the clock which they consume.
 
@@ -270,6 +275,7 @@ Example 2: Examples of clock controller nodes are listed below.
                clocks = <&xxti>,
                       <&cmu_top CLK_ACLK_G2D_266>,
                       <&cmu_top CLK_ACLK_G2D_400>;
+               power-domains = <&pd_g2d>;
        };
 
        cmu_disp: clock-controller@13b90000 {
@@ -295,6 +301,7 @@ Example 2: Examples of clock controller nodes are listed below.
                       <&cmu_mif CLK_SCLK_DECON_ECLK_DISP>,
                       <&cmu_mif CLK_SCLK_DECON_TV_VCLK_DISP>,
                       <&cmu_mif CLK_ACLK_DISP_333>;
+               power-domains = <&pd_disp>;
        };
 
        cmu_aud: clock-controller@114c0000 {
@@ -304,6 +311,7 @@ Example 2: Examples of clock controller nodes are listed below.
 
                clock-names = "oscclk", "fout_aud_pll";
                clocks = <&xxti>, <&cmu_top CLK_FOUT_AUD_PLL>;
+               power-domains = <&pd_aud>;
        };
 
        cmu_bus0: clock-controller@13600000 {
@@ -340,6 +348,7 @@ Example 2: Examples of clock controller nodes are listed below.
 
                clock-names = "oscclk", "aclk_g3d_400";
                clocks = <&xxti>, <&cmu_top CLK_ACLK_G3D_400>;
+               power-domains = <&pd_g3d>;
        };
 
        cmu_gscl: clock-controller@13cf0000 {
@@ -353,6 +362,7 @@ Example 2: Examples of clock controller nodes are listed below.
                clocks = <&xxti>,
                        <&cmu_top CLK_ACLK_GSCL_111>,
                        <&cmu_top CLK_ACLK_GSCL_333>;
+               power-domains = <&pd_gscl>;
        };
 
        cmu_apollo: clock-controller@11900000 {
@@ -384,6 +394,7 @@ Example 2: Examples of clock controller nodes are listed below.
                clocks = <&xxti>,
                       <&cmu_top CLK_SCLK_JPEG_MSCL>,
                       <&cmu_top CLK_ACLK_MSCL_400>;
+               power-domains = <&pd_mscl>;
        };
 
        cmu_mfc: clock-controller@15280000 {
@@ -393,6 +404,7 @@ Example 2: Examples of clock controller nodes are listed below.
 
                clock-names = "oscclk", "aclk_mfc_400";
                clocks = <&xxti>, <&cmu_top CLK_ACLK_MFC_400>;
+               power-domains = <&pd_mfc>;
        };
 
        cmu_hevc: clock-controller@14f80000 {
@@ -402,6 +414,7 @@ Example 2: Examples of clock controller nodes are listed below.
 
                clock-names = "oscclk", "aclk_hevc_400";
                clocks = <&xxti>, <&cmu_top CLK_ACLK_HEVC_400>;
+               power-domains = <&pd_hevc>;
        };
 
        cmu_isp: clock-controller@146d0000 {
@@ -415,6 +428,7 @@ Example 2: Examples of clock controller nodes are listed below.
                clocks = <&xxti>,
                       <&cmu_top CLK_ACLK_ISP_DIS_400>,
                       <&cmu_top CLK_ACLK_ISP_400>;
+               power-domains = <&pd_isp>;
        };
 
        cmu_cam0: clock-controller@120d0000 {
@@ -430,6 +444,7 @@ Example 2: Examples of clock controller nodes are listed below.
                       <&cmu_top CLK_ACLK_CAM0_333>,
                       <&cmu_top CLK_ACLK_CAM0_400>,
                       <&cmu_top CLK_ACLK_CAM0_552>;
+               power-domains = <&pd_cam0>;
        };
 
        cmu_cam1: clock-controller@145d0000 {
@@ -451,6 +466,7 @@ Example 2: Examples of clock controller nodes are listed below.
                       <&cmu_top CLK_ACLK_CAM1_333>,
                       <&cmu_top CLK_ACLK_CAM1_400>,
                       <&cmu_top CLK_ACLK_CAM1_552>;
+               power-domains = <&pd_cam1>;
        };
 
 Example 3: UART controller node that consumes the clock generated by the clock
index a7235e9e1c97d38fdc9ff95a036c403dfb583b28..4491d1c104aacd3b41e73856338c81afc9722f36 100644 (file)
@@ -10,12 +10,23 @@ Required properties :
 - compatible : shall contain only one of the following. The generic
                compatible "qcom,rpmcc" should be also included.
 
+                       "qcom,rpmcc-msm8660", "qcom,rpmcc"
+                       "qcom,rpmcc-apq8060", "qcom,rpmcc"
                        "qcom,rpmcc-msm8916", "qcom,rpmcc"
                        "qcom,rpmcc-msm8974", "qcom,rpmcc"
                        "qcom,rpmcc-apq8064", "qcom,rpmcc"
+                       "qcom,rpmcc-msm8996", "qcom,rpmcc"
 
 - #clock-cells : shall contain 1
 
+The clock enumerators are defined in <dt-bindings/clock/qcom,rpmcc.h>
+and come in pairs: FOO_CLK followed by FOO_A_CLK. The latter clock
+is an "active" clock, which means that the consumer only care that the
+clock is available when the apps CPU subsystem is active, i.e. not
+suspended or in deep idle. If it is important that the clock keeps running
+during system suspend, you need to specify the non-active clock, the one
+not containing *_A_* in the enumerator name.
+
 Example:
        smd {
                compatible = "qcom,smd";
index 316e136865688b7c4fe0c6f1d7b23934d66634d1..f1890d0777a6cc25462d308311b5722a19c21c4d 100644 (file)
@@ -22,6 +22,7 @@ Required Properties:
       - "renesas,r8a7794-cpg-mssr" for the r8a7794 SoC (R-Car E2)
       - "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3)
       - "renesas,r8a7796-cpg-mssr" for the r8a7796 SoC (R-Car M3-W)
+      - "renesas,r8a77970-cpg-mssr" for the r8a77970 SoC (R-Car V3M)
       - "renesas,r8a77995-cpg-mssr" for the r8a77995 SoC (R-Car D3)
 
   - reg: Base address and length of the memory resource used by the CPG/MSSR
@@ -31,8 +32,8 @@ Required Properties:
     clock-names
   - clock-names: List of external parent clock names. Valid names are:
       - "extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794,
-                r8a7795, r8a7796, r8a77995)
-      - "extalr" (r8a7795, r8a7796)
+                r8a7795, r8a7796, r8a77970, r8a77995)
+      - "extalr" (r8a7795, r8a7796, r8a77970)
       - "usb_extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7793, r8a7794)
 
   - #clock-cells: Must be 2
index bb5d942075fbf0bf18fb9d924f34e3508fcf4298..8ff3e2774ed8d1d0fd46cbf5895346f62252c5b0 100644 (file)
@@ -1,6 +1,6 @@
-* Renesas RZ Clock Pulse Generator (CPG)
+* Renesas RZ/A1 Clock Pulse Generator (CPG)
 
-The CPG generates core clocks for the RZ SoCs. It includes the PLL, variable
+The CPG generates core clocks for the RZ/A1 SoCs. It includes the PLL, variable
 CPU and GPU clocks, and several fixed ratio dividers.
 The CPG also provides a Clock Domain for SoC devices, in combination with the
 CPG Module Stop (MSTP) Clocks.
diff --git a/Documentation/devicetree/bindings/display/google,goldfish-fb.txt b/Documentation/devicetree/bindings/display/google,goldfish-fb.txt
new file mode 100644 (file)
index 0000000..751fa9f
--- /dev/null
@@ -0,0 +1,17 @@
+Android Goldfish framebuffer
+
+Android Goldfish framebuffer device used by Android emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-fb"
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+       display-controller@1f008000 {
+               compatible = "google,goldfish-fb";
+               interrupts = <0x10>;
+               reg = <0x1f008000 0x100>;
+       };
index f79854783c2c342655b37e1ffc31cb3cb3f2a02e..5bf77f6dd19db0ea3eab3cb3f837f54089c2a547 100644 (file)
@@ -129,7 +129,7 @@ Optional properties:
 
 example:
 
-display@di0 {
+disp0 {
        compatible = "fsl,imx-parallel-display";
        edid = [edid-data];
        interface-pix-fmt = "rgb24";
index f248056da24cf87009861f9db7650955049278d6..bb2075df9b3826dd813dd23567ac470ffed5144e 100644 (file)
@@ -1,7 +1,9 @@
 * Cadence Quad SPI controller
 
 Required properties:
-- compatible : Should be "cdns,qspi-nor".
+- compatible : should be one of the following:
+       Generic default - "cdns,qspi-nor".
+       For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor".
 - reg : Contains two entries, each of which is a tuple consisting of a
        physical address and length. The first entry is the address and
        length of the controller register set. The second entry is the
@@ -14,6 +16,9 @@ Required properties:
 
 Optional properties:
 - cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
+- cdns,rclk-en : Flag to indicate that QSPI return clock is used to latch
+  the read data rather than the QSPI clock. Make sure that QSPI return
+  clock is populated on the board before using this property.
 
 Optional subnodes:
 Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional
index 504291d2e5c2e5e02b880738c7f453b527dee63c..0ee8edb60efc6d07e106314b57fa1cbaa943ed4d 100644 (file)
@@ -29,7 +29,7 @@ nand: nand@ff900000 {
        #address-cells = <1>;
        #size-cells = <1>;
        compatible = "altr,socfpga-denali-nand";
-       reg = <0xff900000 0x100000>, <0xffb80000 0x10000>;
+       reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
        reg-names = "nand_data", "denali_reg";
        interrupts = <0 144 4>;
 };
index 4cab5d85cf6f8eaf11cdf5b90b18cdcc3def4626..376fa2f50e6bc9b41052928037acd4b3a382d380 100644 (file)
@@ -14,6 +14,7 @@ Required properties:
                  at25df641
                  at26df081a
                  en25s64
+                 mr25h128
                  mr25h256
                  mr25h10
                  mr25h40
index 840f9405dcf0736cc294c6432b8176421e34d609..56d3668e2c50e808fca1274436d151951f4ee6a1 100644 (file)
@@ -1,13 +1,16 @@
 * Serial NOR flash controller for MTK MT81xx (and similar)
 
 Required properties:
-- compatible:    The possible values are:
-                 "mediatek,mt2701-nor"
-                 "mediatek,mt7623-nor"
+- compatible:    For mt8173, compatible should be "mediatek,mt8173-nor",
+                 and it's the fallback compatible for other Soc.
+                 For every other SoC, should contain both the SoC-specific compatible
+                 string and "mediatek,mt8173-nor".
+                 The possible values are:
+                 "mediatek,mt2701-nor", "mediatek,mt8173-nor"
+                 "mediatek,mt2712-nor", "mediatek,mt8173-nor"
+                 "mediatek,mt7622-nor", "mediatek,mt8173-nor"
+                 "mediatek,mt7623-nor", "mediatek,mt8173-nor"
                  "mediatek,mt8173-nor"
-                 For mt8173, compatible should be "mediatek,mt8173-nor".
-                 For every other SoC, should contain both the SoC-specific compatible string
-                 and "mediatek,mt8173-nor".
 - reg:                   physical base address and length of the controller's register
 - clocks:        the phandle of the clocks needed by the nor controller
 - clock-names:           the names of the clocks
index d9b655f110489ba0be1877d6b0338066a1a767a6..d4ee4da584633c95e8ec584ae8e0644d5d05039b 100644 (file)
@@ -5,9 +5,13 @@ Required properties:
  - compatible:         Should be set to one of the following:
                        marvell,pxa3xx-nand
                        marvell,armada370-nand
+                       marvell,armada-8k-nand
  - reg:                The register base for the controller
  - interrupts:         The interrupt to map
  - #address-cells:     Set to <1> if the node includes partitions
+ - marvell,system-controller: Set to retrieve the syscon node that handles
+                       NAND controller related registers (only required
+                       with marvell,armada-8k-nand compatible).
 
 Optional properties:
 
index 7e94b802395d677ec946c874ffaf58969d209a41..74c1180159809e3886caacabef528da0578256a0 100644 (file)
@@ -9,6 +9,7 @@ Required Properties:
  - "renesas,pwm-r8a7794": for R-Car E2
  - "renesas,pwm-r8a7795": for R-Car H3
  - "renesas,pwm-r8a7796": for R-Car M3-W
+ - "renesas,pwm-r8a77995": for R-Car D3
 - reg: base address and length of the registers block for the PWM.
 - #pwm-cells: should be 2. See pwm.txt in this directory for a description of
   the cells format.
index 7ff3f7903f267c9dd8d20983d461c7f344ca7ba6..00d3d58a102fe6da2ed0b70b7ce2d61772b17822 100644 (file)
@@ -10,6 +10,7 @@ on the Qualcomm Hexagon core.
                    "qcom,q6v5-pil",
                    "qcom,msm8916-mss-pil",
                    "qcom,msm8974-mss-pil"
+                   "qcom,msm8996-mss-pil"
 
 - reg:
        Usage: required
index 323cf26374cb14dff4284fb8cdbe4e27270b8107..c797bc9d77d2296a225c8d6da9922fb187243f9a 100644 (file)
@@ -1,20 +1,20 @@
 * i.MX25 Real Time Clock controller
 
-This binding supports the following chips: i.MX25, i.MX53
-
 Required properties:
 - compatible: should be: "fsl,imx25-rtc"
 - reg: physical base address of the controller and length of memory mapped
   region.
+- clocks: should contain the phandle for the rtc clock
 - interrupts: rtc alarm interrupt
 
 Optional properties:
-- interrupts: dryice security violation interrupt
+- interrupts: dryice security violation interrupt (second entry)
 
 Example:
 
-rtc@80056000 {
-       compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
-       reg = <0x80056000 2000>;
-       interrupts = <29 56>;
+rtc@53ffc000 {
+       compatible = "fsl,imx25-rtc";
+       reg = <0x53ffc000 0x4000>;
+       clocks = <&clks 81>;
+       interrupts = <25 56>;
 };
diff --git a/Documentation/devicetree/bindings/rtc/pcf85363.txt b/Documentation/devicetree/bindings/rtc/pcf85363.txt
new file mode 100644 (file)
index 0000000..76fdabc
--- /dev/null
@@ -0,0 +1,17 @@
+NXP PCF85363 Real Time Clock
+============================
+
+Required properties:
+- compatible: Should contain "nxp,pcf85363".
+- reg: I2C address for chip.
+
+Optional properties:
+- interrupts: IRQ line for the RTC (not implemented).
+
+Example:
+
+pcf85363: pcf85363@51 {
+       compatible = "nxp,pcf85363";
+       reg = <0x51>;
+};
+
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt b/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
new file mode 100644 (file)
index 0000000..09fe8f5
--- /dev/null
@@ -0,0 +1,21 @@
+Device-Tree bindings for MediaTek SoC based RTC
+
+Required properties:
+- compatible       : Should be
+                       "mediatek,mt7622-rtc", "mediatek,soc-rtc" : for MT7622 SoC
+- reg              : Specifies base physical address and size of the registers;
+- interrupts       : Should contain the interrupt for RTC alarm;
+- clocks           : Specifies list of clock specifiers, corresponding to
+                     entries in clock-names property;
+- clock-names      : Should contain "rtc" entries
+
+Example:
+
+rtc: rtc@10212800 {
+       compatible = "mediatek,mt7622-rtc",
+                    "mediatek,soc-rtc";
+       reg = <0 0x10212800 0 0x200>;
+       interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>;
+       clocks = <&topckgen CLK_TOP_RTC>;
+       clock-names = "rtc";
+};
diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt
new file mode 100644 (file)
index 0000000..7c170da
--- /dev/null
@@ -0,0 +1,27 @@
+Spreadtrum SC27xx Real Time Clock
+
+Required properties:
+- compatible: should be "sprd,sc2731-rtc".
+- reg: address offset of rtc register.
+- interrupt-parent: phandle for the interrupt controller.
+- interrupts: rtc alarm interrupt.
+
+Example:
+
+       sc2731_pmic: pmic@0 {
+               compatible = "sprd,sc2731";
+               reg = <0>;
+               spi-max-frequency = <26000000>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               rtc@280 {
+                       compatible = "sprd,sc2731-rtc";
+                       reg = <0x280>;
+                       interrupt-parent = <&sc2731_pmic>;
+                       interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+               };
+       };
index b277eca861f72a45c7fa0bbd2d6d35656d730d39..9663cab5224626cd74a6ca0a724c1be09e0bd38c 100644 (file)
@@ -39,6 +39,14 @@ of these nodes are defined by the individual bindings for the specific function
        Definition: a list of channels tied to this function, used for matching
                    the function to a set of virtual channels
 
+- qcom,intents:
+       Usage: optional
+       Value type: <prop-encoded-array>
+       Definition: a list of size,amount pairs describing what intents should
+                   be preallocated for this virtual channel. This can be used
+                   to tweak the default intents available for the channel to
+                   meet expectations of the remote.
+
 = EXAMPLE
 The following example represents the GLINK RPM node on a MSM8996 device, with
 the function for the "rpm_request" channel defined, which is used for
@@ -69,6 +77,8 @@ regualtors and root clocks.
                        compatible = "qcom,rpm-msm8996";
                        qcom,glink-channels = "rpm_requests";
 
+                       qcom,intents = <0x400 5
+                                       0x800 1>;
                        ...
                };
        };
index 27dce08edd733b0a4c85cb2ac71baa90edb043b5..5f3143f970983ca92ce1024eaed87f03fa494877 100644 (file)
@@ -55,7 +55,6 @@ epson,rx8010          I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 epson,rx8581           I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 emmicro,em3027         EM Microelectronic EM3027 Real-time Clock
 fsl,mag3110            MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
-fsl,mc13892            MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
 fsl,mma7660            MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
 fsl,mma8450            MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
 fsl,mpl3115            MPL3115: Absolute Digital Pressure Sensor
@@ -73,7 +72,6 @@ maxim,ds1050          5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237          Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6621          PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion
 maxim,max6625          9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
-mc,rv3029c2            Real Time Clock Module with I2C-Bus
 mcube,mc3230           mCube 3-axis 8-bit digital accelerometer
 memsic,mxc6225         MEMSIC 2-axis 8-bit digital accelerometer
 microchip,mcp4531-502  Microchip 7-bit Single I2C Digital Potentiometer (5k)
@@ -142,6 +140,7 @@ microchip,mcp4662-503       Microchip 8-bit Dual I2C Digital Potentiometer with NV Mem
 microchip,mcp4662-104  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
 microchip,tc654                PWM Fan Speed Controller With Fan Fault Detection
 microchip,tc655                PWM Fan Speed Controller With Fan Fault Detection
+microcrystal,rv3029    Real Time Clock Module with I2C-Bus
 miramems,da226         MiraMEMS DA226 2-axis 14-bit digital accelerometer
 miramems,da280         MiraMEMS DA280 3-axis 14-bit digital accelerometer
 miramems,da311         MiraMEMS DA311 3-axis 12-bit digital accelerometer
index ce02cebac26afdc80d99600725b3aaec24cb01e9..1b27cebb47f4286f8fac429f1361e6cba240d469 100644 (file)
@@ -4,24 +4,35 @@ Usually, we only use device tree for hard wired USB device.
 The reference binding doc is from:
 http://www.devicetree.org/open-firmware/bindings/usb/usb-1_0.ps
 
+
 Required properties:
-- compatible: usbVID,PID. The textual representation of VID, PID shall
-  be in lower case hexadecimal with leading zeroes suppressed. The
-  other compatible strings from the above standard binding could also
-  be used, but a device adhering to this binding may leave out all except
-  for usbVID,PID.
-- reg: the port number which this device is connecting to, the range
-  is 1-31.
+- compatible: "usbVID,PID", where VID is the vendor id and PID the product id.
+  The textual representation of VID and PID shall be in lower case hexadecimal
+  with leading zeroes suppressed. The other compatible strings from the above
+  standard binding could also be used, but a device adhering to this binding
+  may leave out all except for "usbVID,PID".
+- reg: the number of the USB hub port or the USB host-controller port to which
+  this device is attached. The range is 1-255.
+
+
+Required properties for hub nodes with device nodes:
+- #address-cells: shall be 1
+- #size-cells: shall be 0
 
-Example:
 
-&usb1 {
+Required properties for host-controller nodes with device nodes:
+- #address-cells: shall be 1
+- #size-cells: shall be 0
+
+
+Example:
 
+&usb1 {        /* host controller */
        #address-cells = <1>;
        #size-cells = <0>;
 
-       hub: genesys@1 {
+       hub@1 { /* hub connected to port 1 */
                compatible = "usb5e3,608";
                reg = <1>;
        };
-}
+};
index 69f08c0f23a8eb8706a35f43d338061cdaa61868..c180045eb43b190beccc1072a7285f6f5de5bf34 100644 (file)
@@ -237,6 +237,7 @@ CLOCK
   devm_clk_get()
   devm_clk_put()
   devm_clk_hw_register()
+  devm_of_clk_add_hw_provider()
 
 DMA
   dmam_alloc_coherent()
index ec571b9bb18a9e21ba2d3c9491b9dc398916966a..2a84bb3348947c1c0302ecfaa3a01a21fa558fef 100644 (file)
@@ -181,6 +181,7 @@ read the file /proc/PID/status:
   VmPTE:        20 kb
   VmSwap:        0 kB
   HugetlbPages:          0 kB
+  CoreDumping:    0
   Threads:        1
   SigQ:   0/28578
   SigPnd: 0000000000000000
@@ -253,6 +254,8 @@ Table 1-2: Contents of the status files (as of 4.8)
  VmSwap                      amount of swap used by anonymous private data
                              (shmem swap usage is not included)
  HugetlbPages                size of hugetlb memory portions
+ CoreDumping                 process's memory is currently being dumped
+                             (killing the process may lead to a corrupted core)
  Threads                     number of threads
  SigQ                        number of signals queued/max. number for queue
  SigPnd                      bitmap of pending signals for the thread
index c61a99f7c8bbeefa2d96ef6b9fe3ff5c67e135fb..a12c74ce27734eb78a54b77923e648aaceb03430 100644 (file)
@@ -41,7 +41,7 @@ Getting and Building Xen and Dom0
 
  5. make initrd for Dom0/DomU
     # make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \
-      O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
+      O=$(pwd)/build-linux-2.6.18-xen_ia64
     # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \
       2.6.18.8-xen --builtin mptspi --builtin mptbase \
       --builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \
index 361789df51ecf58d5083436792588d2f12faa7f2..aa0a776c817a7ceabb217c3eecc31ecdb32f59c7 100644 (file)
@@ -5,7 +5,6 @@ How to get printk format specifiers right
 :Author: Randy Dunlap <rdunlap@infradead.org>
 :Author: Andrew Murray <amurray@mpc-data.co.uk>
 
-
 Integer types
 =============
 
@@ -45,6 +44,18 @@ return from vsnprintf.
 Raw pointer value SHOULD be printed with %p. The kernel supports
 the following extended format specifiers for pointer types:
 
+Pointer Types
+=============
+
+Pointers printed without a specifier extension (i.e unadorned %p) are
+hashed to give a unique identifier without leaking kernel addresses to user
+space. On 64 bit machines the first 32 bits are zeroed. If you _really_
+want the address see %px below.
+
+::
+
+       %p      abcdef12 or 00000000abcdef12
+
 Symbols/Function Pointers
 =========================
 
@@ -85,18 +96,32 @@ Examples::
        printk("Faulted at %pS\n", (void *)regs->ip);
        printk(" %s%pB\n", (reliable ? "" : "? "), (void *)*stack);
 
-
 Kernel Pointers
 ===============
 
 ::
 
-       %pK     0x01234567 or 0x0123456789abcdef
+       %pK     01234567 or 0123456789abcdef
 
 For printing kernel pointers which should be hidden from unprivileged
 users. The behaviour of ``%pK`` depends on the ``kptr_restrict sysctl`` - see
 Documentation/sysctl/kernel.txt for more details.
 
+Unmodified Addresses
+====================
+
+::
+
+       %px     01234567 or 0123456789abcdef
+
+For printing pointers when you _really_ want to print the address. Please
+consider whether or not you are leaking sensitive information about the
+Kernel layout in memory before printing pointers with %px. %px is
+functionally equivalent to %lx. %px is preferred to %lx because it is more
+uniquely grep'able. If, in the future, we need to modify the way the Kernel
+handles printing pointers it will be nice to be able to find the call
+sites.
+
 Struct Resources
 ================
 
index 1b7728b19ea7aaef7ca77fa5532a8065e66d414d..645fa9c7388a857607685047a547c1e9ede4a4be 100644 (file)
@@ -213,6 +213,11 @@ The tags in common use are:
    which can be found in Documentation/process/submitting-patches.rst.  Code without a
    proper signoff cannot be merged into the mainline.
 
+ - Co-Developed-by: states that the patch was also created by another developer
+   along with the original author.  This is useful at times when multiple
+   people work on a single patch.  Note, this person also needs to have a
+   Signed-off-by: line in the patch as well.
+
  - Acked-by: indicates an agreement by another developer (often a
    maintainer of the relevant code) that the patch is appropriate for
    inclusion into the kernel.
index e89e36ec15a5bf6453db0b77031d23a28c705e07..8ce78f82ae23a96d4301a1fd238f94b8ab3a0eac 100644 (file)
@@ -204,10 +204,17 @@ CONTENTS
  It does so by decrementing the runtime of the executing task Ti at a pace equal
  to
 
-           dq = -max{ Ui, (1 - Uinact) } dt
+           dq = -max{ Ui / Umax, (1 - Uinact - Uextra) } dt
 
- where Uinact is the inactive utilization, computed as (this_bq - running_bw),
- and Ui is the bandwidth of task Ti.
+ where:
+
+  - Ui is the bandwidth of task Ti;
+  - Umax is the maximum reclaimable utilization (subjected to RT throttling
+    limits);
+  - Uinact is the (per runqueue) inactive utilization, computed as
+    (this_bq - running_bw);
+  - Uextra is the (per runqueue) extra reclaimable utilization
+    (subjected to RT throttling limits).
 
 
  Let's now see a trivial example of two deadline tasks with runtime equal
index 1266eeae45f69caeed72902d2507c3131de9a08f..9ce7256c6edba8b605e9928a42159d717f6d7cf5 100644 (file)
@@ -628,12 +628,12 @@ The keyctl syscall functions are:
      defined key type will return its data as is. If a key type does not
      implement this function, error EOPNOTSUPP will result.
 
-     As much of the data as can be fitted into the buffer will be copied to
-     userspace if the buffer pointer is not NULL.
-
-     On a successful return, the function will always return the amount of data
-     available rather than the amount copied.
+     If the specified buffer is too small, then the size of the buffer required
+     will be returned.  Note that in this case, the contents of the buffer may
+     have been overwritten in some undefined way.
 
+     Otherwise, on success, the function will return the amount of data copied
+     into the buffer.
 
   *  Instantiate a partially constructed key::
 
index 119f1515b1acbe693386bf816af524436fa27358..b6c2f9acca92b4f49582934eba45c2b24aeace2c 100644 (file)
@@ -67,8 +67,7 @@ The menu looks like::
 <name-of-detected-video-adapter> tells what video adapter did Linux detect
 -- it's either a generic adapter name (MDA, CGA, HGC, EGA, VGA, VESA VGA [a VGA
 with VESA-compliant BIOS]) or a chipset name (e.g., Trident). Direct detection
-of chipsets is turned off by default (see CONFIG_VIDEO_SVGA in chapter 4 to see
-how to enable it if you really want) as it's inherently unreliable due to
+of chipsets is turned off by default as it's inherently unreliable due to
 absolutely insane PC design.
 
 "0  0F00  80x25" means that the first menu item (the menu items are numbered
@@ -138,7 +137,7 @@ The ID numbers can be divided to those regions::
        0x0f05  VGA 80x30 (480 scans, 16-point font)
        0x0f06  VGA 80x34 (480 scans, 14-point font)
        0x0f07  VGA 80x60 (480 scans, 8-point font)
-       0x0f08  Graphics hack (see the CONFIG_VIDEO_HACK paragraph below)
+       0x0f08  Graphics hack (see the VIDEO_GFX_HACK paragraph below)
 
    0x1000 to 0x7fff - modes specified by resolution. The code has a "0xRRCC"
        form where RR is a number of rows and CC is a number of columns.
@@ -160,58 +159,22 @@ end of the display.
 Options
 ~~~~~~~
 
-Some options can be set in the source text (in arch/i386/boot/video.S).
-All of them are simple #define's -- change them to #undef's when you want to
-switch them off. Currently supported:
-
-CONFIG_VIDEO_SVGA - enables autodetection of SVGA cards. This is switched
-off by default as it's a bit unreliable due to terribly bad PC design. If you
-really want to have the adapter autodetected (maybe in case the ``scan`` feature
-doesn't work on your machine), switch this on and don't cry if the results
-are not completely sane. In case you really need this feature, please drop me
-a mail as I think of removing it some day.
-
-CONFIG_VIDEO_VESA - enables autodetection of VESA modes. If it doesn't work
-on your machine (or displays a "Error: Scanning of VESA modes failed" message),
-you can switch it off and report as a bug.
-
-CONFIG_VIDEO_COMPACT - enables compacting of the video mode list. If there
-are more modes with the same screen size, only the first one is kept (see above
-for more info on mode ordering). However, in very strange cases it's possible
-that the first "version" of the mode doesn't work although some of the others
-do -- in this case turn this switch off to see the rest.
-
-CONFIG_VIDEO_RETAIN - enables retaining of screen contents when switching
-video modes. Works only with some boot loaders which leave enough room for the
-buffer. (If you have old LILO, you can adjust heap_end_ptr and loadflags
-in setup.S, but it's better to upgrade the boot loader...)
-
-CONFIG_VIDEO_LOCAL - enables inclusion of "local modes" in the list. The
-local modes are added automatically to the beginning of the list not depending
-on hardware configuration. The local modes are listed in the source text after
-the "local_mode_table:" line. The comment before this line describes the format
-of the table (which also includes a video card name to be displayed on the
-top of the menu).
-
-CONFIG_VIDEO_400_HACK - force setting of 400 scan lines for standard VGA
-modes. This option is intended to be used on certain buggy BIOSes which draw
-some useless logo using font download and then fail to reset the correct mode.
-Don't use unless needed as it forces resetting the video card.
-
-CONFIG_VIDEO_GFX_HACK - includes special hack for setting of graphics modes
-to be used later by special drivers (e.g., 800x600 on IBM ThinkPad -- see
-ftp://ftp.phys.keio.ac.jp/pub/XFree86/800x600/XF86Configs/XF86Config.IBM_TP560).
+Build options for arch/x86/boot/* are selected by the kernel kconfig
+utility and the kernel .config file.
+
+VIDEO_GFX_HACK - includes special hack for setting of graphics modes
+to be used later by special drivers.
 Allows to set _any_ BIOS mode including graphic ones and forcing specific
 text screen resolution instead of peeking it from BIOS variables. Don't use
 unless you think you know what you're doing. To activate this setup, use
-mode number 0x0f08 (see section 3).
+mode number 0x0f08 (see the Mode IDs section above).
 
 Still doesn't work?
 ~~~~~~~~~~~~~~~~~~~
 
 When the mode detection doesn't work (e.g., the mode list is incorrect or
 the machine hangs instead of displaying the menu), try to switch off some of
-the configuration options listed in section 4. If it fails, you can still use
+the configuration options listed under "Options". If it fails, you can still use
 your kernel with the video mode set directly via the kernel parameter.
 
 In either case, please send me a bug report containing what _exactly_
@@ -228,10 +191,6 @@ contains the most common video BIOS bug called "incorrect vertical display
 end setting". Adding 0x8000 to the mode ID might fix the problem. Unfortunately,
 this must be done manually -- no autodetection mechanisms are available.
 
-If you have a VGA card and your display still looks as on EGA, your BIOS
-is probably broken and you need to set the CONFIG_VIDEO_400_HACK switch to
-force setting of the correct mode.
-
 History
 ~~~~~~~
 
index a0a9c7b3d4d546cc79cdbc39c6555371a4843675..f788264921ffa21415169997225ff40a70469bd7 100644 (file)
@@ -78,3 +78,15 @@ The following IOCTLs are also supported by the device:
   between PCI Function Framework number (used by the event system)
   and Switchtec Logic Port ID and Partition number (which is more
   user friendly).
+
+
+Non-Transparent Bridge (NTB) Driver
+===================================
+
+An NTB driver is provided for the switchtec hardware in switchtec_ntb.
+Currently, it only supports switches configured with exactly 2
+partitions. It also requires the following configuration settings:
+
+* Both partitions must be able to access each other's GAS spaces.
+  Thus, the bits in the GAS Access Vector under Management Settings
+  must be set to support this.
index 055c8b3e101805e3cf66288a914d757efed35a48..5025ff9307e66c590a4a72795f9e2f75f0fbddc8 100644 (file)
@@ -158,10 +158,6 @@ Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
 value lower than this limit will be ignored and the old configuration will be
 retained.
 
-Note: the value of dirty_bytes also must be set greater than
-dirty_background_bytes or the amount of memory corresponding to
-dirty_background_ratio.
-
 ==============================================================
 
 dirty_expire_centisecs
@@ -181,9 +177,6 @@ generating disk writes will itself start writing out dirty data.
 
 The total available memory is not equal to total system memory.
 
-Note: dirty_ratio must be set greater than dirty_background_ratio or
-ratio corresponding to dirty_background_bytes.
-
 ==============================================================
 
 dirty_writeback_centisecs
@@ -818,7 +811,7 @@ tooling to work, you can do:
 swappiness
 
 This control is used to define how aggressive the kernel will swap
-memory pages.  Higher values will increase agressiveness, lower values
+memory pages.  Higher values will increase aggressiveness, lower values
 decrease the amount of swap.  A value of 0 instructs the kernel not to
 initiate swap until the amount of free and file-backed pages is less
 than the high water mark in a zone.
index ec3b46e27b7aa3aeca361d169bfda8ea09bf91fb..0a0930ab415668a97ef649931f4af58c6fb2e2c1 100644 (file)
@@ -82,7 +82,7 @@ Documentation/memory-barriers.txt
      - SMP 배리어 짝맞추기.
      - 메모리 배리어 시퀀스의 예.
      - 읽기 메모리 배리어 vs 로드 예측.
-     - 이행성
+     - Multicopy 원자성.
 
  (*) 명시적 커널 배리어.
 
@@ -656,6 +656,11 @@ Documentation/RCU/rcu_dereference.txt 파일을 주의 깊게 읽어 주시기 
 해줍니다.
 
 
+데이터 의존성에 의해 제공되는 이 순서규칙은 이를 포함하고 있는 CPU 에
+지역적임을 알아두시기 바랍니다.  더 많은 정보를 위해선 "Multicopy 원자성"
+섹션을 참고하세요.
+
+
 데이터 의존성 배리어는 매우 중요한데, 예를 들어 RCU 시스템에서 그렇습니다.
 include/linux/rcupdate.h 의 rcu_assign_pointer() 와 rcu_dereference() 를
 참고하세요.  여기서 데이터 의존성 배리어는 RCU 로 관리되는 포인터의 타겟을 현재
@@ -864,38 +869,10 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레
 주어진 if 문의 then 절과 else 절에게만 (그리고 이 두 절 내에서 호출되는
 함수들에게까지) 적용되지, 이 if 문을 뒤따르는 코드에는 적용되지 않습니다.
 
-마지막으로, 컨트롤 의존성은 이행성 (transitivity) 을 제공하지 -않습니다-.  이건
-'x' 와 'y' 가 둘 다 0 이라는 초기값을 가졌다는 가정 하의 두개의 예제로
-보이겠습니다:
-
-       CPU 0                     CPU 1
-       =======================   =======================
-       r1 = READ_ONCE(x);        r2 = READ_ONCE(y);
-       if (r1 > 0)               if (r2 > 0)
-         WRITE_ONCE(y, 1);         WRITE_ONCE(x, 1);
-
-       assert(!(r1 == 1 && r2 == 1));
-
-이 두 CPU 예제에서 assert() 의 조건은 항상 참일 것입니다.  그리고, 만약 컨트롤
-의존성이 이행성을 (실제로는 그러지 않지만) 보장한다면, 다음의 CPU 가 추가되어도
-아래의 assert() 조건은 참이 될것입니다:
 
-       CPU 2
-       =====================
-       WRITE_ONCE(x, 2);
+컨트롤 의존성에 의해 제공되는 이 순서규칙은 이를 포함하고 있는 CPU 에
+지역적입니다.  더 많은 정보를 위해선 "Multicopy 원자성" 섹션을 참고하세요.
 
-       assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */
-
-하지만 컨트롤 의존성은 이행성을 제공하지 -않기- 때문에, 세개의 CPU 예제가 실행
-완료된 후에 위의 assert() 의 조건은 거짓으로 평가될 수 있습니다.  세개의 CPU
-예제가 순서를 지키길 원한다면, CPU 0 와 CPU 1 코드의 로드와 스토어 사이, "if"
-문 바로 다음에 smp_mb()를 넣어야 합니다.  더 나아가서, 최초의 두 CPU 예제는
-매우 위험하므로 사용되지 않아야 합니다.
-
-이 두개의 예제는 다음 논문:
-http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와
-이 사이트: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html 에 나온 LB 와 WWC
-리트머스 테스트입니다.
 
 요약하자면:
 
@@ -930,8 +907,8 @@ http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와
 
   (*) 컨트롤 의존성은 보통 다른 타입의 배리어들과 짝을 맞춰 사용됩니다.
 
-  (*) 컨트롤 의존성은 이행성을 제공하지 -않습니다-.  이행성이 필요하다면,
-      smp_mb() 를 사용하세요.
+  (*) 컨트롤 의존성은 multicopy 원자성을 제공하지 -않습니다-.  모든 CPU 들이
+      특정 스토어를 동시에 보길 원한다면, smp_mb() 를 사용하세요.
 
   (*) 컴파일러는 컨트롤 의존성을 이해하고 있지 않습니다.  따라서 컴파일러가
       여러분의 코드를 망가뜨리지 않도록 하는건 여러분이 해야 하는 일입니다.
@@ -943,13 +920,14 @@ SMP 배리어 짝맞추기
 CPU 간 상호작용을 다룰 때에 일부 타입의 메모리 배리어는 항상 짝을 맞춰
 사용되어야 합니다.  적절하게 짝을 맞추지 않은 코드는 사실상 에러에 가깝습니다.
 
-범용 배리어들은 범용 배리어끼리도 짝을 맞추지만 이행성이 없는 대부분의 다른
-타입의 배리어들과도 짝을 맞춥니다.  ACQUIRE 배리어는 RELEASE 배리어와 짝을
-맞춥니다만, 둘 다 범용 배리어를 포함해 다른 배리어들과도 짝을 맞출 수 있습니다.
-쓰기 배리어는 데이터 의존성 배리어나 컨트롤 의존성, ACQUIRE 배리어, RELEASE
-배리어, 읽기 배리어, 또는 범용 배리어와 짝을 맞춥니다.  비슷하게 읽기 배리어나
-컨트롤 의존성, 또는 데이터 의존성 배리어는 쓰기 배리어나 ACQUIRE 배리어,
-RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과 같습니다:
+범용 배리어들은 범용 배리어끼리도 짝을 맞추지만 multicopy 원자성이 없는
+대부분의 다른 타입의 배리어들과도 짝을 맞춥니다.  ACQUIRE 배리어는 RELEASE
+배리어와 짝을 맞춥니다만, 둘 다 범용 배리어를 포함해 다른 배리어들과도 짝을
+맞출 수 있습니다.  쓰기 배리어는 데이터 의존성 배리어나 컨트롤 의존성, ACQUIRE
+배리어, RELEASE 배리어, 읽기 배리어, 또는 범용 배리어와 짝을 맞춥니다.
+비슷하게 읽기 배리어나 컨트롤 의존성, 또는 데이터 의존성 배리어는 쓰기 배리어나
+ACQUIRE 배리어, RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과
+같습니다:
 
        CPU 1                 CPU 2
        ===============       ===============
@@ -975,7 +953,7 @@ RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과 같
        ===============       ===============================
        r1 = READ_ONCE(y);
        <범용 배리어>
-       WRITE_ONCE(y, 1);     if (r2 = READ_ONCE(x)) {
+       WRITE_ONCE(x, 1);     if (r2 = READ_ONCE(x)) {
                                 <묵시적 컨트롤 의존성>
                                 WRITE_ONCE(y, 1);
                              }
@@ -1361,57 +1339,74 @@ A 의 로드 두개가 모두 B 의 로드 뒤에 있지만, 서로 다른 값
                                                :       :       +-------+
 
 
-이행
-------
+MULTICOPY 원자
+----------------
 
-이행성(transitivity)은 실제의 컴퓨터 시스템에서 항상 제공되지는 않는, 순서
-맞추기에 대한 상당히 직관적인 개념입니다.  다음의 예가 이행성을 보여줍니다:
+Multicopy 원자성은 실제의 컴퓨터 시스템에서 항상 제공되지는 않는, 순서 맞추기에
+대한 상당히 직관적인 개념으로, 특정 스토어가 모든 CPU 들에게 동시에 보여지게
+됨을, 달리 말하자면 모든 CPU 들이 모든 스토어들이 보여지는 순서를 동의하게 되는
+것입니다.  하지만, 완전한 multicopy 원자성의 사용은 가치있는 하드웨어
+최적화들을 무능하게 만들어버릴 수 있어서, 보다 완화된 형태의 ``다른 multicopy
+원자성'' 라는 이름의, 특정 스토어가 모든 -다른- CPU 들에게는 동시에 보여지게
+하는 보장을 대신 제공합니다.  이 문서의 뒷부분들은 이 완화된 형태에 대해 논하게
+됩니다만, 단순히 ``multicopy 원자성'' 이라고 부르겠습니다.
+
+다음의 예가 multicopy 원자성을 보입니다:
 
        CPU 1                   CPU 2                   CPU 3
        ======================= ======================= =======================
                { X = 0, Y = 0 }
-       STORE X=1               LOAD X                  STORE Y=1
-                               <범용 배리어>              <범용 배리어>
-                               LOAD Y                  LOAD X
-
-CPU 2 의 X 로드가 1을 리턴했고 Y 로드가 0을 리턴했다고 해봅시다.  이는 CPU 2 의
-X 로드가 CPU 1 의 X 스토어 뒤에 이루어졌고 CPU 2 의 Y 로드는 CPU 3 의 Y 스토어
-전에 이루어졌음을 의미합니다.  그럼 "CPU 3 의 X 로드는 0을 리턴할 수 있나요?"
-
-CPU 2 의 X 로드는 CPU 1 의 스토어 후에 이루어졌으니, CPU 3 의 X 로드는 1을
-리턴하는게 자연스럽습니다.  이런 생각이 이행성의 한 예입니다: CPU A 에서 실행된
-로드가 CPU B 에서의 같은 변수에 대한 로드를 뒤따른다면, CPU A 의 로드는 CPU B
-의 로드가 내놓은 값과 같거나 그 후의 값을 내놓아야 합니다.
-
-리눅스 커널에서 범용 배리어의 사용은 이행성을 보장합니다.  따라서, 앞의 예에서
-CPU 2 의 X 로드가 1을, Y 로드는 0을 리턴했다면, CPU 3 의 X 로드는 반드시 1을
-리턴합니다.
-
-하지만, 읽기나 쓰기 배리어에 대해서는 이행성이 보장되지 -않습니다-.  예를 들어,
-앞의 예에서 CPU 2 의 범용 배리어가 아래처럼 읽기 배리어로 바뀐 경우를 생각해
-봅시다:
+       STORE X=1               r1=LOAD X (reads 1)     LOAD Y (reads 1)
+                               <범용 배리어>              <읽기 배리어>
+                               STORE Y=r1              LOAD X
+
+CPU 2 의 Y 로의 스토어에 사용되는 X 로드의 결과가 1 이었고 CPU 3 의 Y 로드가
+1을 리턴했다고 해봅시다.  이는 CPU 1 의 X 로의 스토어가 CPU 2 의 X 로부터의
+로드를 앞서고 CPU 2 의 Y 로의 스토어가 CPU 3 의 Y 로부터의 로드를 앞섬을
+의미합니다.  또한, 여기서의 메모리 배리어들은 CPU 2 가 자신의 로드를 자신의
+스토어 전에 수행하고, CPU 3 가 Y 로부터의 로드를 X 로부터의 로드 전에 수행함을
+보장합니다.  그럼 "CPU 3 의 X 로부터의 로드는 0 을 리턴할 수 있을까요?"
+
+CPU 3 의 X 로드가 CPU 2 의 로드보다 뒤에 이루어졌으므로, CPU 3 의 X 로부터의
+로드는 1 을 리턴한다고 예상하는게 당연합니다.  이런 예상은 multicopy
+원자성으로부터 나옵니다: CPU B 에서 수행된 로드가 CPU A 의 같은 변수로부터의
+로드를 뒤따른다면 (그리고 CPU A 가 자신이 읽은 값으로 먼저 해당 변수에 스토어
+하지 않았다면) multicopy 원자성을 제공하는 시스템에서는, CPU B 의 로드가 CPU A
+의 로드와 같은 값 또는 그 나중 값을 리턴해야만 합니다.  하지만, 리눅스 커널은
+시스템들이 multicopy 원자성을 제공할 것을 요구하지 않습니다.
+
+앞의 범용 메모리 배리어의 사용은 모든 multicopy 원자성의 부족을 보상해줍니다.
+앞의 예에서, CPU 2 의 X 로부터의 로드가 1 을 리턴했고 CPU 3 의 Y 로부터의
+로드가 1 을 리턴했다면, CPU 3 의 X 로부터의 로드는 1을 리턴해야만 합니다.
+
+하지만, 의존성, 읽기 배리어, 쓰기 배리어는 항상 non-multicopy 원자성을 보상해
+주지는 않습니다.  예를 들어, CPU 2 의 범용 배리어가 앞의 예에서 사라져서
+아래처럼 데이터 의존성만 남게 되었다고 해봅시다:
 
        CPU 1                   CPU 2                   CPU 3
        ======================= ======================= =======================
                { X = 0, Y = 0 }
-       STORE X=1               LOAD X                  STORE Y=1
-                               <읽기 배리어>              <범용 배리어>
-                               LOAD Y                  LOAD X
-
-이 코드는 이행성을 갖지 않습니다: 이 예에서는, CPU 2 의 X 로드가 1을
-리턴하고, Y 로드는 0을 리턴하지만 CPU 3 의 X 로드가 0을 리턴하는 것도 완전히
-합법적입니다.
-
-CPU 2 의 읽기 배리어가 자신의 읽기는 순서를 맞춰줘도, CPU 1 의 스토어와의
-순서를 맞춰준다고는 보장할 수 없다는게 핵심입니다.  따라서, CPU 1 과 CPU 2 가
-버퍼나 캐시를 공유하는 시스템에서 이 예제 코드가 실행된다면, CPU 2 는 CPU 1 이
-쓴 값에 좀 빨리 접근할 수 있을 것입니다.  따라서 CPU 1 과 CPU 2 의 접근으로
-조합된 순서를 모든 CPU 가 동의할 수 있도록 하기 위해 범용 배리어가 필요합니다.
-
-범용 배리어는 "글로벌 이행성"을 제공해서, 모든 CPU 들이 오퍼레이션들의 순서에
-동의하게 할 것입니다.  반면, release-acquire 조합은 "로컬 이행성" 만을
-제공해서, 해당 조합이 사용된 CPU 들만이 해당 액세스들의 조합된 순서에 동의함이
-보장됩니다.  예를 들어, 존경스런 Herman Hollerith 의 C 코드로 보면:
+       STORE X=1               r1=LOAD X (reads 1)     LOAD Y (reads 1)
+                               <데이터 의존성>           <읽기 배리어>
+                               STORE Y=r1              LOAD X (reads 0)
+
+이 변화는 non-multicopy 원자성이 만연하게 합니다: 이 예에서, CPU 2 의 X
+로부터의 로드가 1을 리턴하고, CPU 3 의 Y 로부터의 로드가 1 을 리턴하는데, CPU 3
+의 X 로부터의 로드가 0 을 리턴하는게 완전히 합법적입니다.
+
+핵심은, CPU 2 의 데이터 의존성이 자신의 로드와 스토어를 순서짓지만, CPU 1 의
+스토어에 대한 순서는 보장하지 않는다는 것입니다.  따라서, 이 예제가 CPU 1 과
+CPU 2 가 스토어 버퍼나 한 수준의 캐시를 공유하는, multicopy 원자성을 제공하지
+않는 시스템에서 수행된다면 CPU 2 는 CPU 1 의 쓰기에 이른 접근을 할 수도
+있습니다.  따라서, 모든 CPU 들이 여러 접근들의 조합된 순서에 대해서 동의하게
+하기 위해서는 범용 배리어가 필요합니다.
+
+범용 배리어는 non-multicopy 원자성만 보상할 수 있는게 아니라, -모든- CPU 들이
+-모든- 오퍼레이션들의 순서를 동일하게 인식하게 하는 추가적인 순서 보장을
+만들어냅니다.  반대로, release-acquire 짝의 연결은 이런 추가적인 순서는
+제공하지 않는데, 해당 연결에 들어있는 CPU 들만이 메모리 접근의 조합된 순서에
+대해 동의할 것으로 보장됨을 의미합니다.  예를 들어, 존경스런 Herman Hollerith
+의 코드를 C 코드로 변환하면:
 
        int u, v, x, y, z;
 
@@ -1444,8 +1439,7 @@ CPU 2 의 읽기 배리어가 자신의 읽기는 순서를 맞춰줘도, CPU 1
        }
 
 cpu0(), cpu1(), 그리고 cpu2() 는 smp_store_release()/smp_load_acquire() 쌍의
-연결을 통한 로컬 이행성에 동참하고 있으므로, 다음과 같은 결과는 나오지 않을
-겁니다:
+연결에 참여되어 있으므로, 다음과 같은 결과는 나오지 않을 겁니다:
 
        r0 == 1 && r1 == 1 && r2 == 1
 
@@ -1454,8 +1448,9 @@ cpu0() 의 쓰기를 봐야만 하므로, 다음과 같은 결과도 없을 겁
 
        r1 == 1 && r5 == 0
 
-하지만, release-acquire 타동성은 동참한 CPU 들에만 적용되므로 cpu3() 에는
-적용되지 않습니다.  따라서, 다음과 같은 결과가 가능합니다:
+하지만, release-acquire 에 의해 제공되는 순서는 해당 연결에 동참한 CPU 들에만
+적용되므로 cpu3() 에, 적어도 스토어들 외에는 적용되지 않습니다.  따라서, 다음과
+같은 결과가 가능합니다:
 
        r0 == 0 && r1 == 1 && r2 == 1 && r3 == 0 && r4 == 0
 
@@ -1482,8 +1477,8 @@ u 로의 스토어를 cpu1() 의 v 로부터의 로드 뒤에 일어난 것으
 이런 결과는 어떤 것도 재배치 되지 않는, 순차적 일관성을 가진 가상의
 시스템에서도 일어날 수 있음을 기억해 두시기 바랍니다.
 
-다시 말하지만, 당신의 코드가 글로벌 이행성을 필요로 한다면, 범용 배리어를
-사용하십시오.
+다시 말하지만, 당신의 코드가 모든 오퍼레이션들의 완전한 순서를 필요로 한다면,
+범용 배리어를 사용하십시오.
 
 
 ==================
@@ -3046,6 +3041,9 @@ AMD64 Architecture Programmer's Manual Volume 2: System Programming
        Chapter 7.1: Memory-Access Ordering
        Chapter 7.4: Buffering and Combining Memory Writes
 
+ARM Architecture Reference Manual (ARMv8, for ARMv8-A architecture profile)
+       Chapter B2: The AArch64 Application Level Memory Model
+
 IA-32 Intel Architecture Software Developer's Manual, Volume 3:
 System Programming Guide
        Chapter 7.1: Locked Atomic Operations
@@ -3057,6 +3055,8 @@ The SPARC Architecture Manual, Version 9
        Appendix D: Formal Specification of the Memory Models
        Appendix J: Programming with the Memory Models
 
+Storage in the PowerPC (Stone and Fitzgerald)
+
 UltraSPARC Programmer Reference Manual
        Chapter 5: Memory Accesses and Cacheability
        Chapter 15: Sparc-V9 Memory Models
index 8d5830eab26a6ba03c310719eb2cc6b76e64fa5a..4f0c9fc403656d2956fc70334d7d2a0151215f92 100644 (file)
@@ -64,6 +64,8 @@ Groups:
     -EINVAL: Inconsistent restored data
     -EFAULT: Invalid guest ram access
     -EBUSY:  One or more VCPUS are running
+    -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the
+            state is not available
 
   KVM_DEV_ARM_VGIC_GRP_ITS_REGS
   Attributes:
index fa46dcb347bc1d2ac60901c4621bd3bad81de601..ecb0d2dadfb769a83b2a3a3f4a20ce03df0aea79 100644 (file)
@@ -1,5 +1,10 @@
-Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
-which will be found on future Intel CPUs.
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a feature
+which is found on Intel's Skylake "Scalable Processor" Server CPUs.
+It will be avalable in future non-server parts.
+
+For anyone wishing to test or use this feature, it is available in
+Amazon's EC2 C5 instances and is known to work there using an Ubuntu
+17.04 image.
 
 Memory Protection Keys provides a mechanism for enforcing page-based
 protections, but without requiring modification of the page tables
diff --git a/Kbuild b/Kbuild
index af161aa1facd194baeb574e9d36a530274411702..00530420548225a8b26a36f504d9aa00468ddb42 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -18,7 +18,6 @@ targets := kernel/bounds.s
 
 # We use internal kbuild rules to avoid the "is up to date" message from make
 kernel/bounds.s: kernel/bounds.c FORCE
-       $(Q)mkdir -p $(dir $@)
        $(call if_changed_dep,cc_s_c)
 
 $(obj)/$(bounds-file): kernel/bounds.s FORCE
@@ -54,7 +53,6 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 # We use internal kbuild rules to avoid the "is up to date" message from make
 arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
                                       $(obj)/$(timeconst-file) $(obj)/$(bounds-file) FORCE
-       $(Q)mkdir -p $(dir $@)
        $(call if_changed_dep,cc_s_c)
 
 $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
index bcab816b25f49ae0a6d721da76453606bbe585d3..77d819b458a99fb58c1f2f49b24b40c4681ef77a 100644 (file)
@@ -384,6 +384,7 @@ ACPI WMI DRIVER
 L:     platform-driver-x86@vger.kernel.org
 S:     Orphan
 F:     drivers/platform/x86/wmi.c
+F:     include/uapi/linux/wmi.h
 
 AD1889 ALSA SOUND DRIVER
 M:     Thibaut Varene <T-Bone@parisc-linux.org>
@@ -1589,10 +1590,13 @@ F:      drivers/rtc/rtc-armada38x.c
 
 ARM/Mediatek RTC DRIVER
 M:     Eddie Huang <eddie.huang@mediatek.com>
+M:     Sean Wang <sean.wang@mediatek.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+F:     Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
 F:     drivers/rtc/rtc-mt6397.c
+F:     drivers/rtc/rtc-mt7622.c
 
 ARM/Mediatek SoC support
 M:     Matthias Brugger <matthias.bgg@gmail.com>
@@ -4030,6 +4034,26 @@ M:       "Maciej W. Rozycki" <macro@linux-mips.org>
 S:     Maintained
 F:     drivers/net/fddi/defxx.*
 
+DELL SMBIOS DRIVER
+M:     Pali Rohár <pali.rohar@gmail.com>
+M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/dell-smbios.*
+
+DELL SMBIOS SMM DRIVER
+M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/dell-smbios-smm.c
+
+DELL SMBIOS WMI DRIVER
+M:     Mario Limonciello <mario.limonciello@dell.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/dell-smbios-wmi.c
+F:     tools/wmi/dell-smbios-example.c
+
 DELL LAPTOP DRIVER
 M:     Matthew Garrett <mjg59@srcf.ucam.org>
 M:     Pali Rohár <pali.rohar@gmail.com>
@@ -4059,12 +4083,17 @@ S:      Maintained
 F:     Documentation/dcdbas.txt
 F:     drivers/firmware/dcdbas.*
 
-DELL WMI EXTRAS DRIVER
+DELL WMI NOTIFICATIONS DRIVER
 M:     Matthew Garrett <mjg59@srcf.ucam.org>
 M:     Pali Rohár <pali.rohar@gmail.com>
 S:     Maintained
 F:     drivers/platform/x86/dell-wmi.c
 
+DELL WMI DESCRIPTOR DRIVER
+M:     Mario Limonciello <mario.limonciello@dell.com>
+S:     Maintained
+F:     drivers/platform/x86/dell-wmi-descriptor.c
+
 DELTA ST MEDIA DRIVER
 M:     Hugues Fruchet <hugues.fruchet@st.com>
 L:     linux-media@vger.kernel.org
@@ -7181,6 +7210,11 @@ F:       Documentation/wimax/README.i2400m
 F:     drivers/net/wimax/i2400m/
 F:     include/uapi/linux/wimax/i2400m.h
 
+INTEL WMI THUNDERBOLT FORCE POWER DRIVER
+M:     Mario Limonciello <mario.limonciello@dell.com>
+S:     Maintained
+F:     drivers/platform/x86/intel-wmi-thunderbolt.c
+
 INTEL(R) TRACE HUB
 M:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
 S:     Supported
@@ -7442,7 +7476,7 @@ JFS FILESYSTEM
 M:     Dave Kleikamp <shaggy@kernel.org>
 L:     jfs-discussion@lists.sourceforge.net
 W:     http://jfs.sourceforge.net/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
+T:     git git://github.com/kleikamp/linux-shaggy.git
 S:     Maintained
 F:     Documentation/filesystems/jfs.txt
 F:     fs/jfs/
@@ -9297,9 +9331,9 @@ F:        drivers/gpu/drm/mxsfb/
 F:     Documentation/devicetree/bindings/display/mxsfb-drm.txt
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M:     Hyong-Youb Kim <hykim@myri.com>
+M:     Chris Lee <christopher.lee@cspi.com>
 L:     netdev@vger.kernel.org
-W:     https://www.myricom.com/support/downloads/myri10ge.html
+W:     https://www.cspi.com/ethernet-products/support/downloads/
 S:     Supported
 F:     drivers/net/ethernet/myricom/myri10ge/
 
@@ -9695,12 +9729,11 @@ S:      Supported
 F:     drivers/ntb/hw/idt/
 
 NTB INTEL DRIVER
-M:     Jon Mason <jdmason@kudzu.us>
 M:     Dave Jiang <dave.jiang@intel.com>
 L:     linux-ntb@googlegroups.com
 S:     Supported
-W:     https://github.com/jonmason/ntb/wiki
-T:     git git://github.com/jonmason/ntb.git
+W:     https://github.com/davejiang/linux/wiki
+T:     git https://github.com/davejiang/linux.git
 F:     drivers/ntb/hw/intel/
 
 NTFS FILESYSTEM
@@ -10412,6 +10445,8 @@ F:      Documentation/switchtec.txt
 F:     Documentation/ABI/testing/sysfs-class-switchtec
 F:     drivers/pci/switch/switchtec*
 F:     include/uapi/linux/switchtec_ioctl.h
+F:     include/linux/switchtec.h
+F:     drivers/ntb/hw/mscc/
 
 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
@@ -10630,6 +10665,12 @@ S:     Maintained
 F:     crypto/pcrypt.c
 F:     include/crypto/pcrypt.h
 
+PEAQ WMI HOTKEYS DRIVER
+M:     Hans de Goede <hdegoede@redhat.com>
+L:     platform-driver-x86@vger.kernel.org
+S:     Maintained
+F:     drivers/platform/x86/peaq-wmi.c
+
 PER-CPU MEMORY ALLOCATOR
 M:     Tejun Heo <tj@kernel.org>
 M:     Christoph Lameter <cl@linux.com>
@@ -11543,6 +11584,7 @@ F:      include/linux/rpmsg/
 RENESAS CLOCK DRIVERS
 M:     Geert Uytterhoeven <geert+renesas@glider.be>
 L:     linux-renesas-soc@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git clk-renesas
 S:     Supported
 F:     drivers/clk/renesas/
 
index 763ab35df12acd7258f1cc5795c7dece5ece25a7..f761bf475ba5249292af4b086770497199e0b757 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
-PATCHLEVEL = 14
+PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -11,6 +11,10 @@ NAME = Fearless Coyote
 # Comments in this file are targeted only to the developer, do not
 # expect to learn how to build the kernel reading this file.
 
+# That's our default target when none is given on the command line
+PHONY := _all
+_all:
+
 # o Do not use make's built-in rules and variables
 #   (this increases performance and avoids hard-to-debug behaviour);
 # o Look for make include files relative to root of kernel src
@@ -117,10 +121,6 @@ ifeq ("$(origin O)", "command line")
   KBUILD_OUTPUT := $(O)
 endif
 
-# That's our default target when none is given on the command line
-PHONY := _all
-_all:
-
 # Cancel implicit rules on top Makefile
 $(CURDIR)/Makefile Makefile: ;
 
@@ -132,7 +132,7 @@ ifneq ($(KBUILD_OUTPUT),)
 # check that the output directory actually exists
 saved-output := $(KBUILD_OUTPUT)
 KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
-                                                               && /bin/pwd)
+                                                               && pwd)
 $(if $(KBUILD_OUTPUT),, \
      $(error failed to create output directory "$(saved-output)"))
 
@@ -187,15 +187,6 @@ ifeq ("$(origin M)", "command line")
   KBUILD_EXTMOD := $(M)
 endif
 
-# If building an external module we do not care about the all: rule
-# but instead _all depend on modules
-PHONY += all
-ifeq ($(KBUILD_EXTMOD),)
-_all: all
-else
-_all: modules
-endif
-
 ifeq ($(KBUILD_SRC),)
         # building in the source tree
         srctree := .
@@ -207,6 +198,9 @@ else
                 srctree := $(KBUILD_SRC)
         endif
 endif
+
+export KBUILD_CHECKSRC KBUILD_EXTMOD KBUILD_SRC
+
 objtree                := .
 src            := $(srctree)
 obj            := $(objtree)
@@ -215,6 +209,74 @@ VPATH              := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD))
 
 export srctree objtree VPATH
 
+# To make sure we do not include .config for any of the *config targets
+# catch them early, and hand them over to scripts/kconfig/Makefile
+# It is allowed to specify more targets when calling make, including
+# mixing *config targets and build targets.
+# For example 'make oldconfig all'.
+# Detect when mixed targets is specified, and make a second invocation
+# of make so .config is not included in this case either (for *config).
+
+version_h := include/generated/uapi/linux/version.h
+old_version_h := include/linux/version.h
+
+no-dot-config-targets := clean mrproper distclean \
+                        cscope gtags TAGS tags help% %docs check% coccicheck \
+                        $(version_h) headers_% archheaders archscripts \
+                        kernelversion %src-pkg
+
+config-targets := 0
+mixed-targets  := 0
+dot-config     := 1
+
+ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
+       ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
+               dot-config := 0
+       endif
+endif
+
+ifeq ($(KBUILD_EXTMOD),)
+        ifneq ($(filter config %config,$(MAKECMDGOALS)),)
+                config-targets := 1
+                ifneq ($(words $(MAKECMDGOALS)),1)
+                        mixed-targets := 1
+                endif
+        endif
+endif
+# install and modules_install need also be processed one by one
+ifneq ($(filter install,$(MAKECMDGOALS)),)
+        ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
+               mixed-targets := 1
+        endif
+endif
+
+ifeq ($(mixed-targets),1)
+# ===========================================================================
+# We're called with mixed targets (*config and build targets).
+# Handle them one by one.
+
+PHONY += $(MAKECMDGOALS) __build_one_by_one
+
+$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one
+       @:
+
+__build_one_by_one:
+       $(Q)set -e; \
+       for i in $(MAKECMDGOALS); do \
+               $(MAKE) -f $(srctree)/Makefile $$i; \
+       done
+
+else
+
+# We need some generic definitions (do not try to remake the file).
+scripts/Kbuild.include: ;
+include scripts/Kbuild.include
+
+# Read KERNELRELEASE from include/config/kernel.release (if it exists)
+KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
+KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
+export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
+
 # SUBARCH tells the usermode build what the underlying arch is.  That is set
 # first, and if a usermode build is happening, the "ARCH=um" on the command
 # line overrides the setting of ARCH below.  If a native build is happening,
@@ -285,9 +347,6 @@ ifeq ($(ARCH),tilegx)
        SRCARCH := tile
 endif
 
-# Where to locate arch specific headers
-hdr-arch  := $(SRCARCH)
-
 KCONFIG_CONFIG ?= .config
 export KCONFIG_CONFIG
 
@@ -308,45 +367,6 @@ HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS)
 HOSTLDFLAGS  := $(HOST_LFS_LDFLAGS)
 HOST_LOADLIBES := $(HOST_LFS_LIBS)
 
-ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
-HOSTCFLAGS  += -Wno-unused-value -Wno-unused-parameter \
-               -Wno-missing-field-initializers -fno-delete-null-pointer-checks
-endif
-
-# Decide whether to build built-in, modular, or both.
-# Normally, just do built-in.
-
-KBUILD_MODULES :=
-KBUILD_BUILTIN := 1
-
-# If we have only "make modules", don't compile built-in objects.
-# When we're building modules with modversions, we need to consider
-# the built-in objects during the descend as well, in order to
-# make sure the checksums are up to date before we record them.
-
-ifeq ($(MAKECMDGOALS),modules)
-  KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
-endif
-
-# If we have "make <whatever> modules", compile modules
-# in addition to whatever we do anyway.
-# Just "make" or "make all" shall build modules as well
-
-ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
-  KBUILD_MODULES := 1
-endif
-
-ifeq ($(MAKECMDGOALS),)
-  KBUILD_MODULES := 1
-endif
-
-export KBUILD_MODULES KBUILD_BUILTIN
-export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
-
-# We need some generic definitions (do not try to remake the file).
-scripts/Kbuild.include: ;
-include scripts/Kbuild.include
-
 # Make variables (CC, etc...)
 AS             = $(CROSS_COMPILE)as
 LD             = $(CROSS_COMPILE)ld
@@ -374,14 +394,11 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 AFLAGS_KERNEL  =
 LDFLAGS_vmlinux =
-CFLAGS_GCOV    := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
-CFLAGS_KCOV    := $(call cc-option,-fsanitize-coverage=trace-pc,)
-
 
 # Use USERINCLUDE when you must reference the UAPI directories only.
 USERINCLUDE    := \
-               -I$(srctree)/arch/$(hdr-arch)/include/uapi \
-               -I$(objtree)/arch/$(hdr-arch)/include/generated/uapi \
+               -I$(srctree)/arch/$(SRCARCH)/include/uapi \
+               -I$(objtree)/arch/$(SRCARCH)/include/generated/uapi \
                -I$(srctree)/include/uapi \
                -I$(objtree)/include/generated/uapi \
                 -include $(srctree)/include/linux/kconfig.h
@@ -389,40 +406,33 @@ USERINCLUDE    := \
 # Use LINUXINCLUDE when you must reference the include/ directory.
 # Needed to be compatible with the O= option
 LINUXINCLUDE    := \
-               -I$(srctree)/arch/$(hdr-arch)/include \
-               -I$(objtree)/arch/$(hdr-arch)/include/generated \
+               -I$(srctree)/arch/$(SRCARCH)/include \
+               -I$(objtree)/arch/$(SRCARCH)/include/generated \
                $(if $(KBUILD_SRC), -I$(srctree)/include) \
                -I$(objtree)/include \
                $(USERINCLUDE)
 
-KBUILD_CPPFLAGS := -D__KERNEL__
-
+KBUILD_AFLAGS   := -D__ASSEMBLY__
 KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                   -fno-strict-aliasing -fno-common -fshort-wchar \
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
-                  -std=gnu89 $(call cc-option,-fno-PIE)
-
-
+                  -std=gnu89
+KBUILD_CPPFLAGS := -D__KERNEL__
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+GCC_PLUGINS_CFLAGS :=
 
-# Read KERNELRELEASE from include/config/kernel.release (if it exists)
-KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
-KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
-
-export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
 export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
 export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES
 export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -464,73 +474,38 @@ ifneq ($(KBUILD_SRC),)
            $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
 endif
 
-# Support for using generic headers in asm-generic
-PHONY += asm-generic uapi-asm-generic
-asm-generic: uapi-asm-generic
-       $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
-                   src=asm obj=arch/$(SRCARCH)/include/generated/asm
-uapi-asm-generic:
-       $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
-                   src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm
-
-# To make sure we do not include .config for any of the *config targets
-# catch them early, and hand them over to scripts/kconfig/Makefile
-# It is allowed to specify more targets when calling make, including
-# mixing *config targets and build targets.
-# For example 'make oldconfig all'.
-# Detect when mixed targets is specified, and make a second invocation
-# of make so .config is not included in this case either (for *config).
-
-version_h := include/generated/uapi/linux/version.h
-old_version_h := include/linux/version.h
-
-no-dot-config-targets := clean mrproper distclean \
-                        cscope gtags TAGS tags help% %docs check% coccicheck \
-                        $(version_h) headers_% archheaders archscripts \
-                        kernelversion %src-pkg
-
-config-targets := 0
-mixed-targets  := 0
-dot-config     := 1
-
-ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
-       ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
-               dot-config := 0
-       endif
-endif
-
-ifeq ($(KBUILD_EXTMOD),)
-        ifneq ($(filter config %config,$(MAKECMDGOALS)),)
-                config-targets := 1
-                ifneq ($(words $(MAKECMDGOALS)),1)
-                        mixed-targets := 1
-                endif
-        endif
+ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
+GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
 endif
-# install and modules_install need also be processed one by one
-ifneq ($(filter install,$(MAKECMDGOALS)),)
-        ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
-               mixed-targets := 1
-        endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC   := --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
 
-ifeq ($(mixed-targets),1)
-# ===========================================================================
-# We're called with mixed targets (*config and build targets).
-# Handle them one by one.
-
-PHONY += $(MAKECMDGOALS) __build_one_by_one
-
-$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one
-       @:
-
-__build_one_by_one:
-       $(Q)set -e; \
-       for i in $(MAKECMDGOALS); do \
-               $(MAKE) -f $(srctree)/Makefile $$i; \
-       done
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
 
-else
 ifeq ($(config-targets),1)
 # ===========================================================================
 # *config targets only - make sure prerequisites are updated, and descend
@@ -553,6 +528,44 @@ else
 # Build targets only - this includes vmlinux, arch specific targets, clean
 # targets and others. In general all targets except *config targets.
 
+# If building an external module we do not care about the all: rule
+# but instead _all depend on modules
+PHONY += all
+ifeq ($(KBUILD_EXTMOD),)
+_all: all
+else
+_all: modules
+endif
+
+# Decide whether to build built-in, modular, or both.
+# Normally, just do built-in.
+
+KBUILD_MODULES :=
+KBUILD_BUILTIN := 1
+
+# If we have only "make modules", don't compile built-in objects.
+# When we're building modules with modversions, we need to consider
+# the built-in objects during the descend as well, in order to
+# make sure the checksums are up to date before we record them.
+
+ifeq ($(MAKECMDGOALS),modules)
+  KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
+endif
+
+# If we have "make <whatever> modules", compile modules
+# in addition to whatever we do anyway.
+# Just "make" or "make all" shall build modules as well
+
+ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
+  KBUILD_MODULES := 1
+endif
+
+ifeq ($(MAKECMDGOALS),)
+  KBUILD_MODULES := 1
+endif
+
+export KBUILD_MODULES KBUILD_BUILTIN
+
 ifeq ($(KBUILD_EXTMOD),)
 # Additional helpers built in scripts/
 # Carefully list dependencies so we do not try to build scripts twice
@@ -623,6 +636,11 @@ endif
 # Defaults to vmlinux, but the arch makefile usually adds further targets
 all: vmlinux
 
+KBUILD_CFLAGS  += $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS  += $(call cc-option,-fno-PIE)
+CFLAGS_GCOV    := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
+export CFLAGS_GCOV CFLAGS_KCOV
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
@@ -654,11 +672,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
        KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
        KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 
+include scripts/Makefile.kcov
 include scripts/Makefile.gcc-plugins
 
 ifdef CONFIG_READABLE_ASM
@@ -697,38 +716,6 @@ ifdef CONFIG_CC_STACKPROTECTOR
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
-ifeq ($(cc-name),clang)
-ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
-endif
-ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC   := --gcc-toolchain=$(GCC_TOOLCHAIN)
-endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
-endif
-
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
 else
@@ -790,7 +777,7 @@ KBUILD_CFLAGS       += $(call cc-option,-fdata-sections,)
 endif
 
 # arch Makefile may override CC so keep this after arch Makefile is included
-NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
+NOSTDINC_FLAGS += -nostdinc -isystem $(call shell-cached,$(CC) -print-file-name=include)
 CHECKFLAGS     += $(NOSTDINC_FLAGS)
 
 # warn about C99 declaration after statement
@@ -1022,7 +1009,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
 
 PHONY += $(vmlinux-dirs)
 $(vmlinux-dirs): prepare scripts
-       $(Q)$(MAKE) $(build)=$@
+       $(Q)$(MAKE) $(build)=$@ need-builtin=1
 
 define filechk_kernel.release
        echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
@@ -1072,6 +1059,15 @@ prepare0: archprepare gcc-plugins
 # All the preparing..
 prepare: prepare0 prepare-objtool
 
+# Support for using generic headers in asm-generic
+PHONY += asm-generic uapi-asm-generic
+asm-generic: uapi-asm-generic
+       $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
+                   src=asm obj=arch/$(SRCARCH)/include/generated/asm
+uapi-asm-generic:
+       $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
+                   src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm
+
 PHONY += prepare-objtool
 prepare-objtool: $(objtool_target)
 
@@ -1140,8 +1136,8 @@ headerdep:
 #Default location for installed headers
 export INSTALL_HDR_PATH = $(objtree)/usr
 
-# If we do an all arch process set dst to include/arch-$(hdr-arch)
-hdr-dst = $(if $(KBUILD_HEADERS), dst=include/arch-$(hdr-arch), dst=include)
+# If we do an all arch process set dst to include/arch-$(SRCARCH)
+hdr-dst = $(if $(KBUILD_HEADERS), dst=include/arch-$(SRCARCH), dst=include)
 
 PHONY += archheaders
 archheaders:
@@ -1159,10 +1155,10 @@ headers_install_all:
 
 PHONY += headers_install
 headers_install: __headers
-       $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
+       $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
          $(error Headers not exportable for the $(SRCARCH) architecture))
        $(Q)$(MAKE) $(hdr-inst)=include/uapi dst=include
-       $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst)
+       $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi $(hdr-dst)
 
 PHONY += headers_check_all
 headers_check_all: headers_install_all
@@ -1171,7 +1167,7 @@ headers_check_all: headers_install_all
 PHONY += headers_check
 headers_check: headers_install
        $(Q)$(MAKE) $(hdr-inst)=include/uapi dst=include HDRCHECK=1
-       $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst) HDRCHECK=1
+       $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi $(hdr-dst) HDRCHECK=1
 
 # ---------------------------------------------------------------------------
 # Kernel selftest
@@ -1284,7 +1280,7 @@ CLEAN_DIRS  += $(MODVERDIR)
 # Directories & files removed with 'make mrproper'
 MRPROPER_DIRS  += include/config usr/include include/generated          \
                  arch/*/include/generated .tmp_objdiff
-MRPROPER_FILES += .config .config.old .version .old_version \
+MRPROPER_FILES += .config .config.old .version \
                  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
                  signing_key.pem signing_key.priv signing_key.x509     \
                  x509.genkey extra_certificates signing_key.x509.keyid \
@@ -1341,8 +1337,9 @@ package-dir       := scripts/package
        $(Q)$(MAKE) $(build)=$(package-dir) $@
 %pkg: include/config/kernel.release FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
-rpm: include/config/kernel.release FORCE
-       $(Q)$(MAKE) $(build)=$(package-dir) $@
+rpm: rpm-pkg
+       @echo "  WARNING: \"rpm\" target will be removed after Linux 4.18"
+       @echo "           Please use \"rpm-pkg\" instead."
 
 
 # Brief documentation of the typical targets used
@@ -1394,7 +1391,7 @@ help:
        @echo  '  export_report   - List the usages of all exported symbols'
        @echo  '  headers_check   - Sanity check on exported headers'
        @echo  '  headerdep       - Detect inclusion cycles in headers'
-       @$(MAKE) -f $(srctree)/scripts/Makefile.help checker-help
+       @echo  '  coccicheck      - Check with Coccinelle'
        @echo  ''
        @echo  'Kernel selftest:'
        @echo  '  kselftest       - Build and run kernel selftest (run as root)'
@@ -1550,13 +1547,14 @@ clean: $(clean-dirs)
        $(call cmd,rmdirs)
        $(call cmd,rmfiles)
        @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
-               \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+               \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
                -o -name '*.ko.*' -o -name '*.dtb' -o -name '*.dtb.S' \
-               -o -name '*.dwo'  \
+               -o -name '*.dwo' -o -name '*.lst' \
                -o -name '*.su'  \
                -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
                -o -name '*.symtypes' -o -name 'modules.order' \
                -o -name modules.builtin -o -name '.tmp_*.o.*' \
+               -o -name .cache.mk \
                -o -name '*.c.[012]*.*' \
                -o -name '*.ll' \
                -o -name '*.gcno' \) -type f -print | xargs rm -f
@@ -1703,8 +1701,7 @@ cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
 
 # read all saved command lines
 
-targets := $(wildcard $(sort $(targets)))
-cmd_files := $(wildcard .*.cmd $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+cmd_files := $(wildcard .*.cmd $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
 
 ifneq ($(cmd_files),)
   $(cmd_files): ;      # Do not try to update included dependency files
index 5da0aec8ce904d1bacc7a4811bcb5b17f3276fcf..438b10c44d732355888e856668fc09c5a9685e85 100644 (file)
@@ -65,9 +65,9 @@ srmcons_do_receive_chars(struct tty_port *port)
 }
 
 static void
-srmcons_receive_chars(unsigned long data)
+srmcons_receive_chars(struct timer_list *t)
 {
-       struct srmcons_private *srmconsp = (struct srmcons_private *)data;
+       struct srmcons_private *srmconsp = from_timer(srmconsp, t, timer);
        struct tty_port *port = &srmconsp->port;
        unsigned long flags;
        int incr = 10;
@@ -206,8 +206,7 @@ static const struct tty_operations srmcons_ops = {
 static int __init
 srmcons_init(void)
 {
-       setup_timer(&srmcons_singleton.timer, srmcons_receive_chars,
-                       (unsigned long)&srmcons_singleton);
+       timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0);
        if (srm_is_registered_console) {
                struct tty_driver *driver;
                int err;
index 5c7adf100a582ba1d1a4c3d2760d1d7103e0eb0d..9d5fd00d9e91bf0caa066bc0597475723a014328 100644 (file)
@@ -39,7 +39,7 @@ config ARC
        select OF
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
-       select PERF_USE_VMALLOC
+       select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_KERNEL_GZIP
index e114000a84f56c9e07ddd3a2e623c4dfeb3df6a2..74d070cd3c13a723fef1a2b3cd91cd2919392762 100644 (file)
                ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
                interrupt-parent = <&mb_intc>;
 
+               creg_rst: reset-controller@11220 {
+                       compatible = "snps,axs10x-reset";
+                       #reset-cells = <1>;
+                       reg = <0x11220 0x4>;
+               };
+
                i2sclk: i2sclk@100a0 {
                        compatible = "snps,axs10x-i2s-pll-clock";
                        reg = <0x100a0 0x10>;
@@ -73,6 +79,8 @@
                        clocks = <&apbclk>;
                        clock-names = "stmmaceth";
                        max-speed = <100>;
+                       resets = <&creg_rst 5>;
+                       reset-names = "stmmaceth";
                };
 
                ehci@0x40000 {
index b1c56d35f2a938e59c9677454499aeecb1da4f7e..49bfbd879caa6ffa08553e9b0f49b542739bb95b 100644 (file)
 
 /* Build Configuration Registers */
 #define ARC_REG_AUX_DCCM       0x18    /* DCCM Base Addr ARCv2 */
+#define ARC_REG_ERP_CTRL       0x3F    /* ARCv2 Error protection control */
 #define ARC_REG_DCCM_BASE_BUILD        0x61    /* DCCM Base Addr ARCompact */
 #define ARC_REG_CRC_BCR                0x62
 #define ARC_REG_VECBASE_BCR    0x68
 #define ARC_REG_PERIBASE_BCR   0x69
 #define ARC_REG_FP_BCR         0x6B    /* ARCompact: Single-Precision FPU */
 #define ARC_REG_DPFP_BCR       0x6C    /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_ERP_BUILD      0xc7    /* ARCv2 Error protection Build: ECC/Parity */
 #define ARC_REG_FP_V2_BCR      0xc8    /* ARCv2 FPU */
 #define ARC_REG_SLC_BCR                0xce
 #define ARC_REG_DCCM_BUILD     0x74    /* DCCM size (common) */
 #define ARC_REG_D_UNCACH_BCR   0x6A
 #define ARC_REG_BPU_BCR                0xc0
 #define ARC_REG_ISA_CFG_BCR    0xc1
+#define ARC_REG_LPB_BUILD      0xE9    /* ARCv2 Loop Buffer Build */
 #define ARC_REG_RTT_BCR                0xF2
 #define ARC_REG_IRQ_BCR                0xF3
+#define ARC_REG_MICRO_ARCH_BCR 0xF9    /* ARCv2 Product revision */
 #define ARC_REG_SMART_BCR      0xFF
 #define ARC_REG_CLUSTER_BCR    0xcf
 #define ARC_REG_AUX_ICCM       0x208   /* ICCM Base Addr (ARCv2) */
+#define ARC_REG_LPB_CTRL       0x488   /* ARCv2 Loop Buffer control */
 
 /* Common for ARCompact and ARCv2 status register */
 #define ARC_REG_STATUS32       0x0A
@@ -229,6 +234,32 @@ struct bcr_bpu_arcv2 {
 #endif
 };
 
+/* Error Protection Build: ECC/Parity */
+struct bcr_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8;
+#else
+       unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5;
+#endif
+};
+
+/* Error Protection Control */
+struct ctl_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad2:27, mpd:1, pad1:2, dpd:1, dpi:1;
+#else
+       unsigned int dpi:1, dpd:1, pad1:2, mpd:1, pad2:27;
+#endif
+};
+
+struct bcr_lpb {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:16, entries:8, ver:8;
+#else
+       unsigned int ver:8, entries:8, pad:16;
+#endif
+};
+
 struct bcr_generic {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int info:24, ver:8;
@@ -270,7 +301,7 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
-                            fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
+                            fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
                             debug:1, ap:1, smart:1, rtt:1, pad3:4,
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
index 2ce24e74f87956af0bba1d9430200196353bdff8..8aec462d90fbe8f0aa88847272d02004a863f2db 100644 (file)
@@ -336,15 +336,12 @@ static int arc_pmu_add(struct perf_event *event, int flags)
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       if (__test_and_set_bit(idx, pmu_cpu->used_mask)) {
-               idx = find_first_zero_bit(pmu_cpu->used_mask,
-                                         arc_pmu->n_counters);
-               if (idx == arc_pmu->n_counters)
-                       return -EAGAIN;
-
-               __set_bit(idx, pmu_cpu->used_mask);
-               hwc->idx = idx;
-       }
+       idx = ffz(pmu_cpu->used_mask[0]);
+       if (idx == arc_pmu->n_counters)
+               return -EAGAIN;
+
+       __set_bit(idx, pmu_cpu->used_mask);
+       hwc->idx = idx;
 
        write_aux_reg(ARC_REG_PCT_INDEX, idx);
 
@@ -377,21 +374,22 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
        struct perf_sample_data data;
        struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
        struct pt_regs *regs;
-       int active_ints;
+       unsigned int active_ints;
        int idx;
 
        arc_pmu_disable(&arc_pmu->pmu);
 
        active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
+       if (!active_ints)
+               goto done;
 
        regs = get_irq_regs();
 
-       for (idx = 0; idx < arc_pmu->n_counters; idx++) {
-               struct perf_event *event = pmu_cpu->act_counter[idx];
+       do {
+               struct perf_event *event;
                struct hw_perf_event *hwc;
 
-               if (!(active_ints & (1 << idx)))
-                       continue;
+               idx = __ffs(active_ints);
 
                /* Reset interrupt flag by writing of 1 */
                write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
@@ -404,19 +402,22 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
                write_aux_reg(ARC_REG_PCT_INT_CTRL,
                        read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
 
+               event = pmu_cpu->act_counter[idx];
                hwc = &event->hw;
 
                WARN_ON_ONCE(hwc->idx != idx);
 
                arc_perf_event_update(event, &event->hw, event->hw.idx);
                perf_sample_data_init(&data, 0, hwc->last_period);
-               if (!arc_pmu_event_set_period(event))
-                       continue;
+               if (arc_pmu_event_set_period(event)) {
+                       if (perf_event_overflow(event, &data, regs))
+                               arc_pmu_stop(event, 0);
+               }
 
-               if (perf_event_overflow(event, &data, regs))
-                       arc_pmu_stop(event, 0);
-       }
+               active_ints &= ~(1U << idx);
+       } while (active_ints);
 
+done:
        arc_pmu_enable(&arc_pmu->pmu);
 
        return IRQ_HANDLED;
@@ -461,6 +462,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
                pr_err("This core does not have performance counters!\n");
                return -ENODEV;
        }
+       BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
        BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
 
        READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
index fb83844daeea3550aacd27de2525711a6aa5fddc..7ef7d9a8ff89231811e73a241a3a3c6d248e720b 100644 (file)
@@ -199,8 +199,10 @@ static void read_arc_build_cfg_regs(void)
                        unsigned int exec_ctrl;
 
                        READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
-                       cpu->extn.dual_iss_exist = 1;
-                       cpu->extn.dual_iss_enb = exec_ctrl & 1;
+                       cpu->extn.dual_enb = exec_ctrl & 1;
+
+                       /* dual issue always present for this core */
+                       cpu->extn.dual = 1;
                }
        }
 
@@ -253,7 +255,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       cpu_id, cpu->name, cpu->details,
                       is_isa_arcompact() ? "ARCompact" : "ARCv2",
                       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
-                      IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
+                      IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
 
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
                       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -293,11 +295,26 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 
        if (cpu->bpu.ver)
                n += scnprintf(buf + n, len - n,
-                             "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
+                             "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
                              IS_AVAIL1(cpu->bpu.full, "full"),
                              IS_AVAIL1(!cpu->bpu.full, "partial"),
                              cpu->bpu.num_cache, cpu->bpu.num_pred);
 
+       if (is_isa_arcv2()) {
+               struct bcr_lpb lpb;
+
+               READ_BCR(ARC_REG_LPB_BUILD, lpb);
+               if (lpb.ver) {
+                       unsigned int ctl;
+                       ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+
+                       n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
+                               lpb.entries,
+                               IS_DISABLED_RUN(!ctl));
+               }
+       }
+
+       n += scnprintf(buf + n, len - n, "\n");
        return buf;
 }
 
@@ -326,6 +343,24 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
                               cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
                               cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
 
+       if (is_isa_arcv2()) {
+
+               /* Error Protection: ECC/Parity */
+               struct bcr_erp erp;
+               READ_BCR(ARC_REG_ERP_BUILD, erp);
+
+               if (erp.ver) {
+                       struct  ctl_erp ctl;
+                       READ_BCR(ARC_REG_ERP_CTRL, ctl);
+
+                       /* inverted bits: 0 means enabled */
+                       n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
+                               IS_AVAIL3(erp.ic,  !ctl.dpi, "IC "),
+                               IS_AVAIL3(erp.dc,  !ctl.dpd, "DC "),
+                               IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
+               }
+       }
+
        n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
                        EF_ARC_OSABI_CURRENT >> 8,
                        EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
index 8ceefbf72fb0f8b0d1ce9ca1516bb7edd487cc9a..4097764fea23499a828a559f70a62a29daba14c8 100644 (file)
@@ -762,21 +762,23 @@ void read_decode_mmu_bcr(void)
        tmp = read_aux_reg(ARC_REG_MMU_BCR);
        mmu->ver = (tmp >> 24);
 
-       if (mmu->ver <= 2) {
-               mmu2 = (struct bcr_mmu_1_2 *)&tmp;
-               mmu->pg_sz_k = TO_KB(0x2000);
-               mmu->sets = 1 << mmu2->sets;
-               mmu->ways = 1 << mmu2->ways;
-               mmu->u_dtlb = mmu2->u_dtlb;
-               mmu->u_itlb = mmu2->u_itlb;
-       } else if (mmu->ver == 3) {
-               mmu3 = (struct bcr_mmu_3 *)&tmp;
-               mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
-               mmu->sets = 1 << mmu3->sets;
-               mmu->ways = 1 << mmu3->ways;
-               mmu->u_dtlb = mmu3->u_dtlb;
-               mmu->u_itlb = mmu3->u_itlb;
-               mmu->sasid = mmu3->sasid;
+       if (is_isa_arcompact()) {
+               if (mmu->ver <= 2) {
+                       mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+                       mmu->pg_sz_k = TO_KB(0x2000);
+                       mmu->sets = 1 << mmu2->sets;
+                       mmu->ways = 1 << mmu2->ways;
+                       mmu->u_dtlb = mmu2->u_dtlb;
+                       mmu->u_itlb = mmu2->u_itlb;
+               } else {
+                       mmu3 = (struct bcr_mmu_3 *)&tmp;
+                       mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
+                       mmu->sets = 1 << mmu3->sets;
+                       mmu->ways = 1 << mmu3->ways;
+                       mmu->u_dtlb = mmu3->u_dtlb;
+                       mmu->u_itlb = mmu3->u_itlb;
+                       mmu->sasid = mmu3->sasid;
+               }
        } else {
                mmu4 = (struct bcr_mmu_4 *)&tmp;
                mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
@@ -818,8 +820,9 @@ int pae40_exist_but_not_enab(void)
 
 void arc_mmu_init(void)
 {
-       char str[256];
        struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+       char str[256];
+       int compat = 0;
 
        pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
 
@@ -834,15 +837,21 @@ void arc_mmu_init(void)
         */
        BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
 
-       /* For efficiency sake, kernel is compile time built for a MMU ver
-        * This must match the hardware it is running on.
-        * Linux built for MMU V2, if run on MMU V1 will break down because V1
-        *  hardware doesn't understand cmds such as WriteNI, or IVUTLB
-        * On the other hand, Linux built for V1 if run on MMU V2 will do
-        *   un-needed workarounds to prevent memcpy thrashing.
-        * Similarly MMU V3 has new features which won't work on older MMU
+       /*
+        * Ensure that MMU features assumed by kernel exist in hardware.
+        * For older ARC700 cpus, it has to be exact match, since the MMU
+        * revisions were not backwards compatible (MMUv3 TLB layout changed
+        * so even if kernel for v2 didn't use any new cmds of v3, it would
+        * still not work.
+        * For HS cpus, MMUv4 was baseline and v5 is backwards compatible
+        * (will run older software).
         */
-       if (mmu->ver != CONFIG_ARC_MMU_VER) {
+       if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
+               compat = 1;
+       else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
+               compat = 1;
+
+       if (!compat) {
                panic("MMU ver %d doesn't match kernel built for %d...\n",
                      mmu->ver, CONFIG_ARC_MMU_VER);
        }
index c54d1ae57fe0b3feffd4578387f11593c45308e4..4e0df7b7a248147af495948e95488a67e0b78499 100644 (file)
@@ -14,6 +14,8 @@ menuconfig ARC_PLAT_AXS10X
        select MIGHT_HAVE_PCI
        select GENERIC_IRQ_CHIP
        select GPIOLIB
+       select AXS101 if ISA_ARCOMPACT
+       select AXS103 if ISA_ARCV2
        help
          Support for the ARC AXS10x Software Development Platforms.
 
index cf14ebc36916a2a0eca39728c0cc0f315d58bbeb..f1ac6790da5fe64782b59b720bf3ea80d999bff1 100644 (file)
@@ -111,13 +111,6 @@ static void __init axs10x_early_init(void)
 
        axs10x_enable_gpio_intc_wire();
 
-       /*
-        * Reset ethernet IP core.
-        * TODO: get rid of this quirk after axs10x reset driver (or simple
-        * reset driver) will be available in upstream.
-        */
-       iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
-
        scnprintf(mb, 32, "MainBoard v%d", mb_rev);
        axs10x_print_board_ver(CREG_MB_VER, mb);
 }
index 12b8c8f8ec0708f24e5f953857044d45660d618f..17685e19aed8e4792699613eb2df95525b1aca47 100644 (file)
@@ -1776,9 +1776,9 @@ config DEBUG_UART_8250_FLOW_CONTROL
        default y if ARCH_EBSA110 || DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC
 
 config DEBUG_UNCOMPRESS
-       bool
+       bool "Enable decompressor debugging via DEBUG_LL output"
        depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
-       default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
+       depends on DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
                     (!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \
                     !DEBUG_BRCMSTB_UART
        help
index def8824fc71ca36f274b680443ec99a1eeab8c69..80351e505fd57aeb0413a80e61214e341a3e2f48 100644 (file)
@@ -16,11 +16,11 @@ LDFLAGS             :=
 LDFLAGS_vmlinux        :=-p --no-undefined -X --pic-veneer
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux        += --be8
-LDFLAGS_MODULE += --be8
+KBUILD_LDFLAGS_MODULE  += --be8
 endif
 
 ifeq ($(CONFIG_ARM_MODULE_PLTS),y)
-LDFLAGS_MODULE += -T $(srctree)/arch/arm/kernel/module.lds
+KBUILD_LDFLAGS_MODULE  += -T $(srctree)/arch/arm/kernel/module.lds
 endif
 
 GZFLAGS                :=-9
@@ -122,7 +122,7 @@ CFLAGS_ISA  :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
 AFLAGS_ISA     :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
 # Work around buggy relocation from gas if requested:
 ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
-CFLAGS_MODULE  +=-fno-optimize-sibling-calls
+KBUILD_CFLAGS_MODULE   +=-fno-optimize-sibling-calls
 endif
 else
 CFLAGS_ISA     :=$(call cc-option,-marm,)
index ad301f107dd286cff4432b8cc0284ff5976eec54..bc8d4bbd82e27719a990c7972fd77bfca9dc7aef 100644 (file)
@@ -518,4 +518,22 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
 #endif
        .endm
 
+       .macro  bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1:     .inst   0xde02
+#else
+1:     .inst   0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+       .pushsection .rodata.str, "aMS", %progbits, 1
+2:     .asciz  "\msg"
+       .popsection
+       .pushsection __bug_table, "aw"
+       .align  2
+       .word   1b, 2b
+       .hword  \line
+       .popsection
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H__ */
index 2a029bceaf2f8593788dea27ec05f3664fc9733a..1a7a17b2a1bae97a21fca6a4920efd96540ac43d 100644 (file)
@@ -221,7 +221,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
 }
 #define        __HAVE_ARCH_PTE_SPECIAL
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
 #define pmd_dirty(pmd)         (pmd_isset((pmd), L_PMD_SECT_DIRTY))
 #define pud_page(pud)          pmd_page(__pmd(pud_val(pud)))
index 1c462381c225eea31346ec4f19145e3fd449caab..150ece66ddf34506cf8d36963c2461a8188ebe91 100644 (file)
@@ -232,6 +232,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 #define pte_valid_user(pte)    \
        (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
 
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+       pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
+       pteval_t needed = mask;
+
+       if (write)
+               mask |= L_PTE_RDONLY;
+
+       return (pte_val(pte) & mask) == needed;
+}
+#define pte_access_permitted pte_access_permitted
+
 #if __LINUX_ARM_ARCH__ < 6
 static inline void __sync_icache_dcache(pte_t pteval)
 {
index d523cd8439a3df250ec514e2137ba5754c157ffa..7f4d80c2db6bf128451c8390b7f9048ca413034c 100644 (file)
        mov     r2, sp
        ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [r2, #\offset + S_PC]!      @ get pc
+       tst     r1, #0xcf
+       bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        @ We must avoid clrex due to Cortex-A15 erratum #830321
                                                @ after ldm {}^
        add     sp, sp, #\offset + PT_REGS_SIZE
        movs    pc, lr                          @ return & move spsr_svc into cpsr
+1:     bug     "Returning to usermode but unexpected PSR bits set?", \@
 #elif defined(CONFIG_CPU_V7M)
        @ V7M restore.
        @ Note that we don't need to do clrex here as clearing the local
        ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [sp, #\offset + S_PC]       @ get pc
        add     sp, sp, #\offset + S_SP
+       tst     r1, #0xcf
+       bne     1f
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 
        @ We must avoid clrex due to Cortex-A15 erratum #830321
        .endif
        add     sp, sp, #PT_REGS_SIZE - S_SP
        movs    pc, lr                          @ return & move spsr_svc into cpsr
+1:     bug     "Returning to usermode but unexpected PSR bits set?", \@
 #endif /* !CONFIG_THUMB2_KERNEL */
        .endm
 
index f24628db540984bbff0d4274bd1b312746a6347c..e2bd35b6780cd6c859758a276cea0cf3b29eada2 100644 (file)
@@ -4,6 +4,7 @@
 #
 
 source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
 
 menuconfig VIRTUALIZATION
        bool "Virtualization"
@@ -23,6 +24,8 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select ARM_GIC
+       select ARM_GIC_V3
+       select ARM_GIC_V3_ITS
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select HAVE_KVM_ARCH_TLB_FLUSH_ALL
        select KVM_MMIO
@@ -36,6 +39,8 @@ config KVM
        select HAVE_KVM_IRQCHIP
        select HAVE_KVM_IRQ_ROUTING
        select HAVE_KVM_MSI
+       select IRQ_BYPASS_MANAGER
+       select HAVE_KVM_IRQ_BYPASS
        depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
        ---help---
          Support hosting virtualized guest machines.
index f550abd64a25df1f42de16547e6fd4a1aa092787..48de846f22464637be95c64e0a1ff9357b6e5a65 100644 (file)
@@ -32,6 +32,7 @@ obj-y += $(KVM)/arm/vgic/vgic-init.o
 obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
 obj-y += $(KVM)/arm/vgic/vgic-v2.o
 obj-y += $(KVM)/arm/vgic/vgic-v3.o
+obj-y += $(KVM)/arm/vgic/vgic-v4.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
index c1cd80ecc21992b2778c20f03f04ea9c328e0a4e..3b73813c6b0434f93c85bf4256ae70049de8ae2c 100644 (file)
@@ -305,7 +305,7 @@ static void n2100_restart(enum reboot_mode mode, const char *cmd)
 
 static struct timer_list power_button_poll_timer;
 
-static void power_button_poll(unsigned long dummy)
+static void power_button_poll(struct timer_list *unused)
 {
        if (gpio_get_value(N2100_POWER_BUTTON) == 0) {
                ctrl_alt_del();
@@ -336,8 +336,7 @@ static int __init n2100_request_gpios(void)
                        pr_err("could not set power GPIO as input\n");
        }
        /* Set up power button poll timer */
-       init_timer(&power_button_poll_timer);
-       power_button_poll_timer.function = power_button_poll;
+       timer_setup(&power_button_poll_timer, power_button_poll, 0);
        power_button_poll_timer.expires = jiffies + (HZ / 10);
        add_timer(&power_button_poll_timer);
        return 0;
index ac97a459903454e0a88a36de7ab258f68d773c6f..0f5c99941a7d5b14e39663cad535a61698328e68 100644 (file)
@@ -179,10 +179,10 @@ static int power_button_countdown;
 /* Must hold the button down for at least this many counts to be processed */
 #define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
 
-static void dsmg600_power_handler(unsigned long data);
+static void dsmg600_power_handler(struct timer_list *unused);
 static DEFINE_TIMER(dsmg600_power_timer, dsmg600_power_handler);
 
-static void dsmg600_power_handler(unsigned long data)
+static void dsmg600_power_handler(struct timer_list *unused)
 {
        /* This routine is called twice per second to check the
         * state of the power button.
index 43560208540819ffb3b512d562b79a592ba02d10..76dfff03cb714e575cee08e608a32e8d5143cca4 100644 (file)
@@ -202,10 +202,10 @@ static int power_button_countdown;
 /* Must hold the button down for at least this many counts to be processed */
 #define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
 
-static void nas100d_power_handler(unsigned long data);
+static void nas100d_power_handler(struct timer_list *unused);
 static DEFINE_TIMER(nas100d_power_timer, nas100d_power_handler);
 
-static void nas100d_power_handler(unsigned long data)
+static void nas100d_power_handler(struct timer_list *unused)
 {
        /* This routine is called twice per second to check the
         * state of the power button.
index 3f5863de766acbdbe4189f4fe53a23f648710995..39eae10ac8defa76b2574cc907dc7451c5ccd38c 100644 (file)
@@ -172,7 +172,7 @@ static struct platform_device db88f5281_nand_flash = {
 static void __iomem *db88f5281_7seg;
 static struct timer_list db88f5281_timer;
 
-static void db88f5281_7seg_event(unsigned long data)
+static void db88f5281_7seg_event(struct timer_list *unused)
 {
        static int count = 0;
        writel(0, db88f5281_7seg + (count << 4));
@@ -189,7 +189,7 @@ static int __init db88f5281_7seg_init(void)
                        printk(KERN_ERR "Failed to ioremap db88f5281_7seg\n");
                        return -EIO;
                }
-               setup_timer(&db88f5281_timer, db88f5281_7seg_event, 0);
+               timer_setup(&db88f5281_timer, db88f5281_7seg_event, 0);
                mod_timer(&db88f5281_timer, jiffies + 2 * HZ);
        }
 
index b592f79a1742d7eab4f2315a41ebbdacf595bdda..fa8e7dd4d898aab839c67cfd5f4ecd90bc90e2ac 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/nand-gpio.h>
-
+#include <linux/gpio/machine.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/pxa2xx_spi.h>
 
@@ -176,6 +176,17 @@ static inline void cmx255_init_nor(void) {}
 #endif
 
 #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE)
+
+static struct gpiod_lookup_table cmx255_nand_gpiod_table = {
+       .dev_id         = "gpio-nand",
+       .table          = {
+               GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH),
+       },
+};
+
 static struct resource cmx255_nand_resource[] = {
        [0] = {
                .start = PXA_CS1_PHYS,
@@ -198,11 +209,6 @@ static struct mtd_partition cmx255_nand_parts[] = {
 };
 
 static struct gpio_nand_platdata cmx255_nand_platdata = {
-       .gpio_nce = GPIO_NAND_CS,
-       .gpio_cle = GPIO_NAND_CLE,
-       .gpio_ale = GPIO_NAND_ALE,
-       .gpio_rdy = GPIO_NAND_RB,
-       .gpio_nwp = -1,
        .parts = cmx255_nand_parts,
        .num_parts = ARRAY_SIZE(cmx255_nand_parts),
        .chip_delay = 25,
@@ -220,6 +226,7 @@ static struct platform_device cmx255_nand = {
 
 static void __init cmx255_init_nand(void)
 {
+       gpiod_add_lookup_table(&cmx255_nand_gpiod_table);
        platform_device_register(&cmx255_nand);
 }
 #else
index 6bea3d3a2dd76c4129a4b99c793a11651747e214..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644 (file)
@@ -1 +0,0 @@
-obj- += dummy.o
index 35ff45470dbfd5bac025eb149294307ab6ef2b7d..fc3b44028cfb22fb140ad75b621cf690c94e5a43 100644 (file)
@@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = {
                .val    = PMD_SECT_USER,
                .set    = "USR",
        }, {
-               .mask   = L_PMD_SECT_RDONLY,
-               .val    = L_PMD_SECT_RDONLY,
+               .mask   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+               .val    = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
                .set    = "ro",
                .clear  = "RW",
 #elif __LINUX_ARM_ARCH__ >= 6
index 81d4482b6861ca2f3ea71d56b1a967db953fc94a..a1f11a7ee81b2f81a512a59ca7cbc1b163189b47 100644 (file)
@@ -629,8 +629,8 @@ static struct section_perm ro_perms[] = {
                .start  = (unsigned long)_stext,
                .end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
-               .mask   = ~L_PMD_SECT_RDONLY,
-               .prot   = L_PMD_SECT_RDONLY,
+               .mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
+               .prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
 #else
                .mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
                .prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
index c9530b5b5ca836cbe23216d664e3ea9939d3b126..149d05fb9421520bd659b62627941ed36ce46bb3 100644 (file)
@@ -345,7 +345,6 @@ static inline int pmd_protnone(pmd_t pmd)
 
 #define pmd_thp_or_huge(pmd)   (pmd_huge(pmd) || pmd_trans_huge(pmd))
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 
 #define pmd_mkhuge(pmd)                (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
index 13f81f97139088cdc870b7dcbc6b990355e10865..2257dfcc44cce003b54d139e00aebe1743ed5e35 100644 (file)
@@ -4,6 +4,7 @@
 #
 
 source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
 
 menuconfig VIRTUALIZATION
        bool "Virtualization"
@@ -36,6 +37,8 @@ config KVM
        select HAVE_KVM_MSI
        select HAVE_KVM_IRQCHIP
        select HAVE_KVM_IRQ_ROUTING
+       select IRQ_BYPASS_MANAGER
+       select HAVE_KVM_IRQ_BYPASS
        ---help---
          Support hosting virtualized guest machines.
          We don't support KVM with 16K page tables yet, due to the multiple
index 861acbbac385626b0adcbc62ccd9107876e5ff6c..87c4f7ae24de238a354a97f9eff5216deb90c7ee 100644 (file)
@@ -27,6 +27,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v4.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
index 1e714329fe8a186f59f7a6bf25de7393a14fb403..8a211d95821f6d5b98992a65b820da20a7a60b21 100644 (file)
@@ -166,7 +166,7 @@ int check_nmi_wdt_touched(void)
        return 1;
 }
 
-static void nmi_wdt_timer(unsigned long data)
+static void nmi_wdt_timer(struct timer_list *unused)
 {
        if (check_nmi_wdt_touched())
                nmi_wdt_keepalive();
@@ -180,8 +180,7 @@ static int __init init_nmi_wdt(void)
        nmi_wdt_start();
        nmi_active = true;
 
-       init_timer(&ntimer);
-       ntimer.function = nmi_wdt_timer;
+       timer_setup(&ntimer, nmi_wdt_timer, 0);
        ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
        add_timer(&ntimer);
 
index 6b0be670ddfabc5c49c64320f8be1fe455ba9acf..6f6096ff05a462182797cc75587f70c7d6286a53 100644 (file)
@@ -12,7 +12,7 @@ cflags-y += -mno-dsbt -msdata=none -D__linux__
 
 cflags-$(CONFIG_C6X_BIG_KERNEL) += -mlong-calls
 
-CFLAGS_MODULE   += -mlong-calls -mno-dsbt -msdata=none
+KBUILD_CFLAGS_MODULE   += -mlong-calls -mno-dsbt -msdata=none
 
 CHECKFLAGS      +=
 
diff --git a/arch/frv/kernel/.gitignore b/arch/frv/kernel/.gitignore
new file mode 100644 (file)
index 0000000..c5f676c
--- /dev/null
@@ -0,0 +1 @@
+vmlinux.lds
index 48fe08230a8088c669822a5ebb05e705318644ae..2efaa18e995ad8d6694b6532bea265aeb3460658 100644 (file)
@@ -12,9 +12,9 @@ KBUILD_CFLAGS += -fno-short-enums
 
 # Modules must use either long-calls, or use pic/plt.
 # Use long-calls for now, it's easier.  And faster.
-# CFLAGS_MODULE += -fPIC
-# LDFLAGS_MODULE += -shared
-CFLAGS_MODULE += -mlong-calls
+# KBUILD_CFLAGS_MODULE += -fPIC
+# KBUILD_LDFLAGS_MODULE += -shared
+KBUILD_CFLAGS_MODULE += -mlong-calls
 
 cflags-y += $(call cc-option,-mv${CONFIG_HEXAGON_ARCH_VERSION})
 aflags-y += $(call cc-option,-mv${CONFIG_HEXAGON_ARCH_VERSION})
index ecd75e2e8eb391a6598db7bb092cc5701f1545d6..fa76493c17459639915c7df9410dcb8f3aaa8da2 100644 (file)
@@ -18,8 +18,6 @@
  * 02110-1301, USA.
  */
 
-#include <generated/compile.h>
-
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
@@ -180,7 +178,7 @@ static const struct user_regset hexagon_regsets[] = {
 };
 
 static const struct user_regset_view hexagon_user_view = {
-       .name = UTS_MACHINE,
+       .name = "hexagon",
        .e_machine = ELF_ARCH,
        .ei_osabi = ELF_OSABI,
        .regsets = hexagon_regsets,
index 3ad8f698836346793816a79ada16035f40b8197e..82f9bf702804f1e04653fb7b4bf227d3f48b6393 100644 (file)
                               cpu_all_mask :                           \
                               &node_to_cpu_mask[node])
 
-/*
- * Returns the number of the node containing Node 'nid'.
- * Not implemented here. Multi-level hierarchies detected with
- * the help of node_distance().
- */
-#define parent_node(nid) (nid)
-
 /*
  * Determines the node for a given pci bus
  */
index f7693f49c573693259e4182d468b3d99f5cbe03a..f4db2168d1b89e00723d4cd7b6950855ae18f6c6 100644 (file)
@@ -31,8 +31,8 @@ void foo(void)
        DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
        DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
 
-       BUILD_BUG_ON(sizeof(struct upid) != 32);
-       DEFINE(IA64_UPID_SHIFT, 5);
+       BUILD_BUG_ON(sizeof(struct upid) != 16);
+       DEFINE(IA64_UPID_SHIFT, 4);
 
        BLANK();
 
index a23f48181fd6a4c39fa672bc9c2757ffd18e384b..442bdeee6bd7920c9d6bb4e3edefc734951d072e 100644 (file)
@@ -65,7 +65,7 @@ void __init amiga_init_sound(void)
 #endif
 }
 
-static void nosound( unsigned long ignored );
+static void nosound(struct timer_list *unused);
 static DEFINE_TIMER(sound_timer, nosound);
 
 void amiga_mksound( unsigned int hz, unsigned int ticks )
@@ -107,7 +107,7 @@ void amiga_mksound( unsigned int hz, unsigned int ticks )
 }
 
 
-static void nosound( unsigned long ignored )
+static void nosound(struct timer_list *unused)
 {
        /* turn off DMA for audio channel 2 */
        custom.dmacon = DMAF_AUD2;
index d176686496410a5131047d02d345b40b8e4065b2..388780797f7d2290fc6a6a9247272243a7ab756d 100644 (file)
@@ -48,9 +48,9 @@ static unsigned long mac_bell_phasepersample;
  * some function protos
  */
 static void mac_init_asc( void );
-static void mac_nosound( unsigned long );
+static void mac_nosound(struct timer_list *);
 static void mac_quadra_start_bell( unsigned int, unsigned int, unsigned int );
-static void mac_quadra_ring_bell( unsigned long );
+static void mac_quadra_ring_bell(struct timer_list *);
 static void mac_av_start_bell( unsigned int, unsigned int, unsigned int );
 static void ( *mac_special_bell )( unsigned int, unsigned int, unsigned int );
 
@@ -216,7 +216,7 @@ void mac_mksound( unsigned int freq, unsigned int length )
 /*
  * regular ASC: stop whining ..
  */
-static void mac_nosound( unsigned long ignored )
+static void mac_nosound(struct timer_list *unused)
 {
        mac_asc_regs[ ASC_ENABLE ] = 0;
 }
@@ -270,7 +270,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
  * already load the wave table, or at least call this one...
  * This piece keeps reloading the wave table until done.
  */
-static void mac_quadra_ring_bell( unsigned long ignored )
+static void mac_quadra_ring_bell(struct timer_list *unused)
 {
        int     i, count = mac_asc_samplespersec / HZ;
        unsigned long flags;
index 99472d2ca3404d44e43d1565ff7bef7fdb30d442..97559fe0b95364f78e3eca81f951641bfbe09c26 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/atomic.h>
 #include <linux/mm_types.h>
+#include <linux/sched.h>
 
 #include <asm/bitops.h>
 #include <asm/mmu.h>
index 09ba7e894bad006471cf39a9924efaa41084c80f..d8787c9a499e4713a7e5092f540fe4d3ba49255b 100644 (file)
@@ -35,6 +35,3 @@ dtb-$(CONFIG_DT_NONE) += \
        bcm97435svmb.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index f5d01b31df50139eaff92f1e5e01f286a925d016..24a8efcd7b038760db523802c4fbca851a740cf4 100644 (file)
@@ -2,6 +2,3 @@
 dtb-$(CONFIG_CAVIUM_OCTEON_SOC)        += octeon_3xxx.dtb octeon_68xx.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 3eb2597a4d6c5b884c34360aa03bf6b71d5eec9c..441a3c16efb0d97842ad2326622a85563b5f7129 100644 (file)
@@ -3,6 +3,3 @@ dtb-$(CONFIG_FIT_IMAGE_FDT_BOSTON)      += boston.dtb
 
 dtb-$(CONFIG_MACH_PISTACHIO)   += pistachio_marduk.dtb
 obj-$(CONFIG_MACH_PISTACHIO)   += pistachio_marduk.dtb.o
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 035769269cbc412ba32b61a5c6488d4012a2851c..6a31759839b415d008b35e5159b2a81335de92c9 100644 (file)
@@ -3,6 +3,3 @@ dtb-$(CONFIG_JZ4740_QI_LB60)    += qi_lb60.dtb
 dtb-$(CONFIG_JZ4780_CI20)      += ci20.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 00e2e540ed3f2d230ea6615dffe12a8c3e5a5bd9..51ab9c1dff42a2553acc2ce559f5629bd41c4ed5 100644 (file)
@@ -2,6 +2,3 @@
 dtb-$(CONFIG_DT_EASY50712)     += easy50712.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 480af498a9ddba4fa68afe919722d531cb1e9e7e..3508720cb6d9e1da773e52c7ca21b2ac5cb57c30 100644 (file)
@@ -3,6 +3,3 @@ dtb-$(CONFIG_MIPS_MALTA)        += malta.dtb
 dtb-$(CONFIG_LEGACY_BOARD_SEAD3)       += sead3.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 2b99450d743344b4e2be402bba1ea6544e05a680..d630b27950f0605d58d9047cc7ab76090206dafe 100644 (file)
@@ -6,6 +6,3 @@ dtb-$(CONFIG_DT_XLP_GVP)        += xlp_gvp.dtb
 dtb-$(CONFIG_DT_XLP_RVP)       += xlp_rvp.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 6cd9c606f0255cb6678c282487cd02a85a9b68be..9e2c9faede4739853e0bb35442cb51cd2e4cca1a 100644 (file)
@@ -1,4 +1 @@
 dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445)   += 169445.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                                   += dummy.o
index a139a0fbd7b794095d92ddf513875e61687d2f0a..ba9bcef8fde91dfaa03a75f61cda7c5edb92e464 100644 (file)
@@ -5,6 +5,3 @@ dtb-$(CONFIG_DTB_PIC32_NONE)            += \
                                        pic32mzda_sk.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 639adeac90af8cd68389ec168ba6b29000197c55..4451cf45b0ad04532dd108677a3b8329093c4756 100644 (file)
@@ -5,6 +5,3 @@ dtb-$(CONFIG_ATH79)                     += ar9331_dpt_module.dtb
 dtb-$(CONFIG_ATH79)                    += ar9331_dragino_ms14.dtb
 dtb-$(CONFIG_ATH79)                    += ar9331_omega.dtb
 dtb-$(CONFIG_ATH79)                    += ar9331_tl_mr3020.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 323c8bcfb602b9072d12c0cafceec8cf69da50ca..94bee5b38b53b3701072002e8d5c5939bbfb6dd9 100644 (file)
@@ -7,6 +7,3 @@ dtb-$(CONFIG_DTB_OMEGA2P)       += omega2p.dtb
 dtb-$(CONFIG_DTB_VOCORE2)      += vocore2.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 616322405ade7a1e2bc13fd7738534bceeb10d17..9987e0e378c50c6f19eb0457eae914f827f9688b 100644 (file)
@@ -2,6 +2,3 @@
 dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA)    += nexys4ddr.dtb
 
 obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj-                           += dummy.o
index 9e9e94415d08f13db779b0d63ea42692f0705c64..1a508a74d48d3f70595a2c5981b114f3e37d1061 100644 (file)
@@ -552,7 +552,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                       pmd_t *pmdp, pmd_t pmd);
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return !!(pmd_val(pmd) & _PAGE_WRITE);
index d535edc01434117a8809fc21fb152226e0b46521..75fdeaa8c62f21a5420c963968c0188bbb459f49 100644 (file)
@@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r = -EINTR;
-       sigset_t sigsaved;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (vcpu->mmio_needed) {
                if (!vcpu->mmio_is_write)
@@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        local_irq_enable();
 
 out:
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index a8103f6972cd43a63163eec228faa5b917685f83..5d89e1ec5fcc3f31feb8181f1c635284036c6ea2 100644 (file)
@@ -156,7 +156,7 @@ static const struct file_operations pvc_scroll_proc_fops = {
        .write          = pvc_scroll_proc_write,
 };
 
-void pvc_proc_timerfunc(unsigned long data)
+void pvc_proc_timerfunc(struct timer_list *unused)
 {
        if (scroll_dir < 0)
                pvc_move(DISPLAY|RIGHT);
@@ -197,7 +197,7 @@ static int __init pvc_proc_init(void)
        if (proc_entry == NULL)
                goto error;
 
-       setup_timer(&timer, pvc_proc_timerfunc, 0UL);
+       timer_setup(&timer, pvc_proc_timerfunc, 0);
 
        return 0;
 error:
index 063de44675cefbd256dde16690f6070fac0725f7..ee0bd50f754bfb0f3d81f534d599d2aada55cfb3 100644 (file)
@@ -36,10 +36,10 @@ void mips_display_message(const char *str)
        }
 }
 
-static void scroll_display_message(unsigned long unused);
+static void scroll_display_message(struct timer_list *unused);
 static DEFINE_TIMER(mips_scroll_timer, scroll_display_message);
 
-static void scroll_display_message(unsigned long unused)
+static void scroll_display_message(struct timer_list *unused)
 {
        mips_display_message(&display_string[display_count++]);
        if (display_count == max_display_count)
index f23781d6bbb3fed5c763249a52afc3858d2a12fe..f0bfa1448744740f2b2e2d7da0e2206271f453a4 100644 (file)
@@ -60,7 +60,7 @@ void bust_spinlocks(int yes)
 void do_BUG(const char *file, int line)
 {
        bust_spinlocks(1);
-       printk(KERN_EMERG "------------[ cut here ]------------\n");
+       printk(KERN_EMERG CUT_HERE);
        printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
 }
 
index 27a2dd616a7d1732dc27dc332fcbd162375a7caf..c46bf29ae412f8007c8cd1e6a118a4730d4f1ff8 100644 (file)
@@ -91,7 +91,7 @@ static int pdc_console_setup(struct console *co, char *options)
 
 #define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
 
-static void pdc_console_poll(unsigned long unused);
+static void pdc_console_poll(struct timer_list *unused);
 static DEFINE_TIMER(pdc_console_timer, pdc_console_poll);
 static struct tty_port tty_port;
 
@@ -135,7 +135,7 @@ static const struct tty_operations pdc_console_tty_ops = {
        .chars_in_buffer = pdc_console_tty_chars_in_buffer,
 };
 
-static void pdc_console_poll(unsigned long unused)
+static void pdc_console_poll(struct timer_list *unused)
 {
        int data, count = 0;
 
index 9a677cd5997f9a891c2ecc8f6e3bbd08c5c41dbe..44697817ccc6ddc13406dc30388d06d7e8795335 100644 (file)
@@ -1005,7 +1005,6 @@ static inline int pmd_protnone(pmd_t pmd)
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
-#define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 #define __pmd_write(pmd)       __pte_write(pmd_pte(pmd))
 #define pmd_savedwrite(pmd)    pte_savedwrite(pmd_pte(pmd))
index 7f74c282710f4c232c873119ebd2118c2307eb29..fad0e6ff460f22398b8487083cb34cb7abcb1de9 100644 (file)
 #include <linux/io.h>
 #include <asm/opal.h>
 
-/*
- * For static allocation of some of the structures.
- */
-#define IMC_MAX_PMUS                   32
-
 /*
  * Compatibility macros for IMC devices
  */
@@ -125,4 +120,5 @@ enum {
 extern int init_imc_pmu(struct device_node *parent,
                                struct imc_pmu *pmu_ptr, int pmu_id);
 extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
 #endif /* __ASM_POWERPC_IMC_PMU_H */
index 96753f3aac6dd7e753ba9b2f9ad5e4ebba7f50aa..941c2a3f231b90686481b6e711dbded50f72eaf6 100644 (file)
@@ -180,6 +180,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
                struct iommu_group *grp);
 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
 
 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                                struct kvm_create_spapr_tce_64 *args);
index 602e0fde19b4a28ca505162dc07546bb2422b988..8bdc2f96c5d6a7a29b0fd299f2e121dc6a993911 100644 (file)
@@ -735,8 +735,8 @@ static __init void cpufeatures_cpu_quirks(void)
         */
        if ((version & 0xffffff00) == 0x004e0100)
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
-       else if ((version & 0xffffefff) == 0x004e0200)
-               cur_cpu_spec->cpu_features &= ~CPU_FTR_POWER9_DD2_1;
+       else if ((version & 0xffffefff) == 0x004e0201)
+               cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
 }
 
 static void __init cpufeatures_setup_finished(void)
index e3c5f75d137c51ec94e5d73e5e4d10f9d2944b99..8cdd852aedd1e86193b036b765b6c5b6d27ac643 100644 (file)
@@ -188,7 +188,7 @@ static void tau_timeout(void * info)
        local_irq_restore(flags);
 }
 
-static void tau_timeout_smp(unsigned long unused)
+static void tau_timeout_smp(struct timer_list *unused)
 {
 
        /* schedule ourselves to be run again */
@@ -230,7 +230,7 @@ int __init TAU_init(void)
 
 
        /* first, set up the window shrinking timer */
-       setup_timer(&tau_timer, tau_timeout_smp, 0UL);
+       timer_setup(&tau_timer, tau_timeout_smp, 0);
        tau_timer.expires = jiffies + shrink_timer;
        add_timer(&tau_timer);
 
index 235319c2574e07f03c3473d66e160e6e900204e2..966097232d2147bbcd79df41354a5f973fb9b7c1 100644 (file)
@@ -1238,8 +1238,9 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
        unsigned long vpte, rpte, guest_rpte;
        int ret;
        struct revmap_entry *rev;
-       unsigned long apsize, psize, avpn, pteg, hash;
+       unsigned long apsize, avpn, pteg, hash;
        unsigned long new_idx, new_pteg, replace_vpte;
+       int pshift;
 
        hptep = (__be64 *)(old->virt + (idx << 4));
 
@@ -1298,8 +1299,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                goto out;
 
        rpte = be64_to_cpu(hptep[1]);
-       psize = hpte_base_page_size(vpte, rpte);
-       avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23);
+       pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
+       avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
        pteg = idx / HPTES_PER_GROUP;
        if (vpte & HPTE_V_SECONDARY)
                pteg = ~pteg;
@@ -1311,20 +1312,20 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
                offset = (avpn & 0x1f) << 23;
                vsid = avpn >> 5;
                /* We can find more bits from the pteg value */
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (offset / psize);
+               hash = vsid ^ (offset >> pshift);
        } else {
                unsigned long offset, vsid;
 
                /* We only have 40 - 23 bits of seg_off in avpn */
                offset = (avpn & 0x1ffff) << 23;
                vsid = avpn >> 17;
-               if (psize < (1ULL << 23))
-                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize;
+               if (pshift < 23)
+                       offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
 
-               hash = vsid ^ (vsid << 25) ^ (offset / psize);
+               hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
        }
 
        new_pteg = hash & new_hash_mask;
@@ -1801,6 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
        ssize_t nb;
        long int err, ret;
        int mmu_ready;
+       int pshift;
 
        if (!access_ok(VERIFY_READ, buf, count))
                return -EFAULT;
@@ -1855,6 +1857,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                        err = -EINVAL;
                        if (!(v & HPTE_V_VALID))
                                goto out;
+                       pshift = kvmppc_hpte_base_page_shift(v, r);
+                       if (pshift <= 0)
+                               goto out;
                        lbuf += 2;
                        nb += HPTE_SIZE;
 
@@ -1869,14 +1874,18 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!mmu_ready && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_base_page_size(v, r);
-                               unsigned long senc = slb_pgsize_encoding(psize);
-                               unsigned long lpcr;
+                               unsigned long senc, lpcr;
 
+                               senc = slb_pgsize_encoding(1ul << pshift);
                                kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
                                        (VRMA_VSID << SLB_VSID_SHIFT_1T);
-                               lpcr = senc << (LPCR_VRMASD_SH - 4);
-                               kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+                               if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+                                       lpcr = senc << (LPCR_VRMASD_SH - 4);
+                                       kvmppc_update_lpcr(kvm, lpcr,
+                                                          LPCR_VRMASD);
+                               } else {
+                                       kvmppc_setup_partition_table(kvm);
+                               }
                                mmu_ready = 1;
                        }
                        ++i;
index 79ea3d9269dbf568904e504d78cc56850c77860d..2d46037ce93664199adee27806b8972d9130368d 100644 (file)
@@ -120,7 +120,6 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-static void kvmppc_setup_partition_table(struct kvm *kvm);
 
 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
                int *ip)
@@ -3574,7 +3573,7 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
        return;
 }
 
-static void kvmppc_setup_partition_table(struct kvm *kvm)
+void kvmppc_setup_partition_table(struct kvm *kvm)
 {
        unsigned long dw0, dw1;
 
index 071b87ee682f8ea85e37ae42cbb227714695c284..83b485810aea2fbfccc01718d1823f30b18e9398 100644 (file)
@@ -599,9 +599,9 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
        spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
 }
 
-void kvmppc_watchdog_func(unsigned long data)
+void kvmppc_watchdog_func(struct timer_list *t)
 {
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+       struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
        u32 tsr, new_tsr;
        int final;
 
@@ -1412,8 +1412,7 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
 {
        /* setup watchdog timer once */
        spin_lock_init(&vcpu->arch.wdt_lock);
-       setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
-                   (unsigned long)vcpu);
+       timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
 
        /*
         * Clear DBSR.MRR to avoid guest debug interrupt as
index 6b6c53c42ac9455f2a8c4f157f402da772163336..1915e86cef6f8fc2e05852ddc7a0867eca1c560b 100644 (file)
@@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int r;
-       sigset_t sigsaved;
 
        if (vcpu->mmio_needed) {
                vcpu->mmio_needed = 0;
@@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 #endif
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (run->immediate_exit)
                r = -EINTR;
        else
                r = kvmppc_vcpu_run(run, vcpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index c9de03e0c1f123de531d376d8b4d330d6f61bcdc..d469224c4ada8c23b923dc077b6249fa79ae0583 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/tlbflush.h>
 #include <asm/page.h>
 #include <asm/code-patching.h>
+#include <asm/setup.h>
 
 static int __patch_instruction(unsigned int *addr, unsigned int instr)
 {
@@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
         * During early early boot patch_instruction is called
         * when text_poke_area is not ready, but we still need
         * to allow patching. We just do the plain old patching
-        * We use slab_is_available and per cpu read * via this_cpu_read
-        * of text_poke_area. Per-CPU areas might not be up early
-        * this can create problems with just using this_cpu_read()
         */
-       if (!slab_is_available() || !this_cpu_read(text_poke_area))
+       if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
                return __patch_instruction(addr, instr);
 
        local_irq_save(flags);
index 564fff06f5c11ed32bd1ef97b1b9ad521e9cd9a3..23ec2c5e3b782412f8b10717cee352e56cc31217 100644 (file)
@@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
        return !slice_area_is_free(mm, start, end - start);
 }
 
-static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+                               unsigned long high_limit)
 {
        unsigned long i;
 
@@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
                if (!slice_low_has_vma(mm, i))
                        ret->low_slices |= 1u << i;
 
-       if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
+       if (high_limit <= SLICE_LOW_TOP)
                return;
 
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
                if (!slice_high_has_vma(mm, i))
                        __set_bit(i, ret->high_slices);
 }
 
-static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
+static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
+                               unsigned long high_limit)
 {
        unsigned char *hpsizes;
        int index, mask_index;
@@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
                if (((lpsizes >> (i * 4)) & 0xf) == psize)
                        ret->low_slices |= 1u << i;
 
+       if (high_limit <= SLICE_LOW_TOP)
+               return;
+
        hpsizes = mm->context.high_slices_psize;
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
                mask_index = i & 0x1;
                index = i >> 1;
                if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
                           struct slice_mask mask, struct slice_mask available)
 {
        DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+       /*
+        * Make sure we just do bit compare only to the max
+        * addr limit and not the full bit map size.
+        */
        unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
 
        bitmap_and(result, mask.high_slices,
@@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
        /* First make up a "good" mask of slices that have the right size
         * already
         */
-       slice_mask_for_size(mm, psize, &good_mask);
+       slice_mask_for_size(mm, psize, &good_mask, high_limit);
        slice_print_mask(" good_mask", good_mask);
 
        /*
@@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
 #ifdef CONFIG_PPC_64K_PAGES
        /* If we support combo pages, we can allow 64k pages in 4k slices */
        if (psize == MMU_PAGE_64K) {
-               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
                if (fixed)
                        slice_or_mask(&good_mask, &compat_mask);
        }
@@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
                        return newaddr;
                }
        }
-
-       /* We don't fit in the good mask, check what other slices are
+       /*
+        * We don't fit in the good mask, check what other slices are
         * empty and thus can be converted
         */
-       slice_mask_for_free(mm, &potential_mask);
+       slice_mask_for_free(mm, &potential_mask, high_limit);
        slice_or_mask(&potential_mask, &good_mask);
        slice_print_mask(" potential", potential_mask);
 
@@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
 {
        struct slice_mask mask, available;
        unsigned int psize = mm->context.user_psize;
+       unsigned long high_limit = mm->context.slb_addr_limit;
 
        if (radix_enabled())
                return 0;
 
        slice_range_to_mask(addr, len, &mask);
-       slice_mask_for_size(mm, psize, &available);
+       slice_mask_for_size(mm, psize, &available, high_limit);
 #ifdef CONFIG_PPC_64K_PAGES
        /* We need to account for 4k slices too */
        if (psize == MMU_PAGE_64K) {
                struct slice_mask compat_mask;
-               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+               slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
                slice_or_mask(&available, &compat_mask);
        }
 #endif
index 264b6ab11978dc7a84cd0fbb58fb2245003c1b3d..b90a21bc2f3faeb2e14384c0df62b7c53a6acda7 100644 (file)
@@ -451,7 +451,7 @@ static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
  * This routine will alternate loading the virtual counters for
  * virtual CPUs
  */
-static void cell_virtual_cntr(unsigned long data)
+static void cell_virtual_cntr(struct timer_list *unused)
 {
        int i, prev_hdw_thread, next_hdw_thread;
        u32 cpu;
@@ -555,7 +555,7 @@ static void cell_virtual_cntr(unsigned long data)
 
 static void start_virt_cntrs(void)
 {
-       setup_timer(&timer_virt_cntr, cell_virtual_cntr, 0UL);
+       timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
        timer_virt_cntr.expires = jiffies + HZ / 10;
        add_timer(&timer_virt_cntr);
 }
@@ -587,7 +587,7 @@ static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
  * periodically based on kernel timer to switch which SPU is
  * being monitored in a round robbin fashion.
  */
-static void spu_evnt_swap(unsigned long data)
+static void spu_evnt_swap(struct timer_list *unused)
 {
        int node;
        int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
@@ -677,7 +677,7 @@ static void spu_evnt_swap(unsigned long data)
 
 static void start_spu_event_swap(void)
 {
-       setup_timer(&timer_spu_event_swap, spu_evnt_swap, 0UL);
+       timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
        timer_spu_event_swap.expires = jiffies + HZ / 25;
        add_timer(&timer_spu_event_swap);
 }
index 36344117c680b9e0500b345c18f98d11cb246134..0ead3cd73caa2f8816e8c04f47cca691efba0560 100644 (file)
@@ -26,7 +26,7 @@
  */
 static DEFINE_MUTEX(nest_init_lock);
 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
-static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
+static struct imc_pmu **per_nest_pmu_arr;
 static cpumask_t nest_imc_cpumask;
 struct imc_pmu_ref *nest_imc_refc;
 static int nest_pmus;
@@ -286,13 +286,14 @@ static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
 static void nest_change_cpu_context(int old_cpu, int new_cpu)
 {
        struct imc_pmu **pn = per_nest_pmu_arr;
-       int i;
 
        if (old_cpu < 0 || new_cpu < 0)
                return;
 
-       for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
+       while (*pn) {
                perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
+               pn++;
+       }
 }
 
 static int ppc_nest_imc_cpu_offline(unsigned int cpu)
@@ -467,7 +468,7 @@ static int nest_imc_event_init(struct perf_event *event)
         * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
         * Get the base memory addresss for this cpu.
         */
-       chip_id = topology_physical_package_id(event->cpu);
+       chip_id = cpu_to_chip_id(event->cpu);
        pcni = pmu->mem_info;
        do {
                if (pcni->id == chip_id) {
@@ -524,19 +525,19 @@ static int nest_imc_event_init(struct perf_event *event)
  */
 static int core_imc_mem_init(int cpu, int size)
 {
-       int phys_id, rc = 0, core_id = (cpu / threads_per_core);
+       int nid, rc = 0, core_id = (cpu / threads_per_core);
        struct imc_mem_info *mem_info;
 
        /*
         * alloc_pages_node() will allocate memory for core in the
         * local node only.
         */
-       phys_id = topology_physical_package_id(cpu);
+       nid = cpu_to_node(cpu);
        mem_info = &core_imc_pmu->mem_info[core_id];
        mem_info->id = core_id;
 
        /* We need only vbase for core counters */
-       mem_info->vbase = page_address(alloc_pages_node(phys_id,
+       mem_info->vbase = page_address(alloc_pages_node(nid,
                                          GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
                                          __GFP_NOWARN, get_order(size)));
        if (!mem_info->vbase)
@@ -797,14 +798,14 @@ static int core_imc_event_init(struct perf_event *event)
 static int thread_imc_mem_alloc(int cpu_id, int size)
 {
        u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
-       int phys_id = topology_physical_package_id(cpu_id);
+       int nid = cpu_to_node(cpu_id);
 
        if (!local_mem) {
                /*
                 * This case could happen only once at start, since we dont
                 * free the memory in cpu offline path.
                 */
-               local_mem = page_address(alloc_pages_node(phys_id,
+               local_mem = page_address(alloc_pages_node(nid,
                                  GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
                                  __GFP_NOWARN, get_order(size)));
                if (!local_mem)
@@ -1194,6 +1195,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
                kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
        kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
        kfree(pmu_ptr);
+       kfree(per_nest_pmu_arr);
        return;
 }
 
@@ -1218,6 +1220,13 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
                        return -ENOMEM;
 
                /* Needed for hotplug/migration */
+               if (!per_nest_pmu_arr) {
+                       per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
+                                               sizeof(struct imc_pmu *),
+                                               GFP_KERNEL);
+                       if (!per_nest_pmu_arr)
+                               return -ENOMEM;
+               }
                per_nest_pmu_arr[pmu_index] = pmu_ptr;
                break;
        case IMC_DOMAIN_CORE:
index 1fbb5da17dd27ffa2ceb26e09614111e0955a3a9..9033c8194eda5d7d39db99af0af9e6f33811a272 100644 (file)
@@ -992,13 +992,13 @@ static void spu_calc_load(void)
        CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
 }
 
-static void spusched_wake(unsigned long data)
+static void spusched_wake(struct timer_list *unused)
 {
        mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
        wake_up_process(spusched_task);
 }
 
-static void spuloadavg_wake(unsigned long data)
+static void spuloadavg_wake(struct timer_list *unused)
 {
        mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
        spu_calc_load();
@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
                LOAD_INT(c), LOAD_FRAC(c),
                count_active_contexts(),
                atomic_read(&nr_spu_contexts),
-               task_active_pid_ns(current)->last_pid);
+               idr_get_cursor(&task_active_pid_ns(current)->idr));
        return 0;
 }
 
@@ -1124,8 +1124,8 @@ int __init spu_sched_init(void)
        }
        spin_lock_init(&spu_prio->runq_lock);
 
-       setup_timer(&spusched_timer, spusched_wake, 0);
-       setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
+       timer_setup(&spusched_timer, spusched_wake, 0);
+       timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
 
        spusched_task = kthread_run(spusched_thread, NULL, "spusched");
        if (IS_ERR(spusched_task)) {
index 39a1d4225e0f7c114701e8550b073eb3aabd647d..3408f315ef48ed238a43f81c82e63a9bcb652e00 100644 (file)
@@ -361,9 +361,9 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void kw_i2c_timeout(unsigned long data)
+static void kw_i2c_timeout(struct timer_list *t)
 {
-       struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
+       struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&host->lock, flags);
@@ -513,7 +513,7 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
        mutex_init(&host->mutex);
        init_completion(&host->complete);
        spin_lock_init(&host->lock);
-       setup_timer(&host->timeout_timer, kw_i2c_timeout, (unsigned long)host);
+       timer_setup(&host->timeout_timer, kw_i2c_timeout, 0);
 
        psteps = of_get_property(np, "AAPL,address-step", NULL);
        steps = psteps ? (*psteps) : 0x10;
index 21f6531fae20fc1e010f499923cde964540b895a..465ea105b7710ecf0ff7320dcec8fd2b9775cf2e 100644 (file)
@@ -153,6 +153,22 @@ static void disable_core_pmu_counters(void)
        put_online_cpus();
 }
 
+int get_max_nest_dev(void)
+{
+       struct device_node *node;
+       u32 pmu_units = 0, type;
+
+       for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
+               if (of_property_read_u32(node, "type", &type))
+                       continue;
+
+               if (type == IMC_TYPE_CHIP)
+                       pmu_units++;
+       }
+
+       return pmu_units;
+}
+
 static int opal_imc_counters_probe(struct platform_device *pdev)
 {
        struct device_node *imc_dev = pdev->dev.of_node;
@@ -191,8 +207,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
                        break;
                }
 
-               if (!imc_pmu_create(imc_dev, pmu_count, domain))
-                       pmu_count++;
+               if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
+                       if (domain == IMC_DOMAIN_NEST)
+                               pmu_count++;
+               }
        }
 
        return 0;
index c488621dbec30f66db91ed8713212181820d7e25..aebbe95c9230bc4b85016a945303307aa96c369e 100644 (file)
@@ -135,6 +135,7 @@ int chip_to_vas_id(int chipid)
        }
        return -1;
 }
+EXPORT_SYMBOL(chip_to_vas_id);
 
 static int vas_probe(struct platform_device *pdev)
 {
index 6b3f41985f28e17763b0f71973d56186ed28c7dc..de54cfc6109d833017b29b6e115549aad2601d53 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # s390/Makefile
 #
@@ -6,10 +7,6 @@
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1994 by Linus Torvalds
 #
 
index ef3fb1b9201f0331d333dc991af47c84597292a7..cb6e8066b1ad64b1a65ee441e3c1fb53e5599a8b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  * Exports appldata_register_ops() and appldata_unregister_ops() for the
index 598df5708501734307565d3d7ba7c2e8b3472f6e..e68136c3c23aa467a9e10b362888023abe9559d4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects data related to memory management.
index 66037d2622b4075c708068a414aa4b346f8740c1..8bc14b0d1def0a6847437f7ade9a0abc4217b770 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects accumulated network statistics (Packets received/transmitted,
index 45b3178200abc184ef790458d7d9c44d717379df..433a994b1a89ef30861e86559d1849d8f6d01a16 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Data gathering module for Linux-VM Monitor Stream, Stage 1.
  * Collects misc. OS related data (CPU utilization, running processes).
index aed3069699bd5abf94ffbeb71a7db756b210b12f..bed227f267ae52aab3a02ec5e8a0f01767896a10 100644 (file)
@@ -1,11 +1,8 @@
 #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
 #
 # arch/s390x/boot/install.sh
 #
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
 # Copyright (C) 1995 by Linus Torvalds
 #
 # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
index b48e20dd94e96a52f845782c2dd134b015794004..d60798737d8669ea447ae0905b98abf64e85aea6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
  *             Harald Freudenberger <freude@de.ibm.com>
  *
  * Derived from "crypto/aes_generic.c"
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #define KMSG_COMPONENT "aes_s390"
index 36aefc07d10cda9e9b28705d057952558d86e8d3..8720e9203ecfb07beac7c878aedfc925e2b0db1b 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 arch random implementation.
  *
  * Copyright IBM Corp. 2017
  * Author(s): Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #include <linux/kernel.h>
index 992e630c227b58febc6e489f25e439979a2df4ca..436865926c26e00b0652c5330f45dd7365962703 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Crypto-API module for CRC-32 algorithms implemented with the
  * z/Architecture Vector Extension Facility.
index 0d296662bbf0aba03dbaa79de182114e315b6184..5346b5a80bb6c1bfd805b421e7fcf05e86157a12 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * Copyright IBM Corp. 2003, 2011
  * Author(s): Thomas Spatzier
  *           Jan Glauber (jan.glauber@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include <linux/init.h>
index 564616d48d8bd885ce31c232843f02bbed1d2b3f..3b7f96c9eead8f994e979603216fd0d190fc9e42 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
index a4e903ed7e21c0ccc7ceb66944ac6b43662003ea..003932db8d12d04bfc479d17d9a6677068059293 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Cryptographic API.
  *
@@ -7,11 +8,6 @@
  *   Copyright IBM Corp. 2017
  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  *             Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "paes_s390"
index 3e47c4a0f18b346f4ce9eb58ddb4928bcfe517a3..a97a1802cfb4d37d6e0ef5b9a9305aaf45febff7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2006, 2015
  * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
index 10f2007900790919f41ac6f3d3bd425c9decebb6..d6f8258b44df381943b8c570883ace6484363edb 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #ifndef _CRYPTO_ARCH_S390_SHA_H
 #define _CRYPTO_ARCH_S390_SHA_H
index 53c277999a2866b3aea6e9cc412ab6fc55aeb124..944aa6b237cd828fa17356b2eb961be87b57bbab 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -6,12 +7,6 @@
  * s390 Version:
  *   Copyright IBM Corp. 2005, 2011
  *   Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
index 2f4caa1ef123d1e6ce1bbf332ba045510e12ae07..b17eded532b121a76ddab6a8e9227a92fcbbfef6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
index c740f77285b2a6cf9d468b84a4c357cc8b6eba17..cf0718d121bcbb02f035f39474481947e447cf0e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Cryptographic API.
  *
@@ -5,12 +6,6 @@
  *
  * Copyright IBM Corp. 2007
  * Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
  */
 
 #include <crypto/internal/hash.h>
index cf8a2d92467f363a6a2195f77a6d07dff2b5d3d0..43bbe63e2992c1ef525597bf4475b5f4c26d134b 100644 (file)
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *    Hypervisor filesystem for Linux on s390.
  *
  *    Copyright IBM Corp. 2006, 2008
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
- *    License: GPL
  */
 
 #define KMSG_COMPONENT "hypfs"
index 792cda339af1ae3ad25ce8fc0457f4db961a4b8a..dd08db491b89e149fc7894a5f77578db8fe950d6 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * CPU-measurement facilities
  *
  *  Copyright IBM Corp. 2012
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  *            Jan Glauber <jang@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #ifndef _ASM_S390_CPU_MF_H
 #define _ASM_S390_CPU_MF_H
index 9a3cb3983c0140110f791c14ea0dbdb8cd84159d..1a61b1b997f2a0da08882411a8b701780fffba9a 100644 (file)
@@ -194,13 +194,14 @@ struct arch_elf_state {
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
-/*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
-#define ELF_ET_DYN_BASE                (is_compat_task() ? 0x000400000UL : \
-                                                   0x100000000UL)
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk. 64-bit
+   tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
+                               (STACK_TOP / 3 * 2) : \
+                               (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
index 921391f2341eb8c4e886ee1bb4b67554f2d35d91..13de80cf741c09d94a4996a5ca18d883cbe0eba7 100644 (file)
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 #ifndef _ASM_S390_KPROBES_H
 #define _ASM_S390_KPROBES_H
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * 2002-Oct    Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
index f3a9b5a445b64382c1020099b6dbcec5d9f30ede..e14f381757f67b6c0111c78c491c2c1078a7f177 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for kernel virtual machines on s390
  *
  * Copyright IBM Corp. 2008, 2009
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
index 41393052ac57e1966ca735295be5f20f58ac3c55..74eeec9c0a809bffecbb26f3875ffc5be62e7e3b 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for paravirtual devices on s390
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
 /*
@@ -20,8 +17,6 @@
  *
  * Copyright IBM Corp. 2007,2008
  * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
  */
 #ifndef __S390_KVM_PARA_H
 #define __S390_KVM_PARA_H
index 6de5c6cb0061a337d251fbabfe6d8a1c3769682d..672f95b12d4065b4fa023444dbd5575ca71d767e 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * livepatch.h - s390-specific Kernel Live Patching Core
  *
@@ -7,13 +8,6 @@
  *           Jiri Slaby
  */
 
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
 #ifndef ASM_LIVEPATCH_H
 #define ASM_LIVEPATCH_H
 
index f4a07f788f78b3160f9ae312e699805b47f67ebe..65154eaa3714a4e9182cb87654e7b896e7be3e2f 100644 (file)
@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste ||
                test_thread_flag(TIF_PGSTE) ||
-               current->mm->context.alloc_pgste;
+               (current->mm && current->mm->context.alloc_pgste);
        mm->context.has_pgste = 0;
        mm->context.use_skey = 0;
        mm->context.use_cmma = 0;
index d7fe9838084d3b2df31b26d16d6d3f4074d9a35d..57d7bc92e0b8a766d24520ea5234fca56971b646 100644 (file)
@@ -709,7 +709,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
@@ -1264,6 +1264,12 @@ static inline pud_t pud_mkwrite(pud_t pud)
        return pud;
 }
 
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+       return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
+}
+
 static inline pud_t pud_mkclean(pud_t pud)
 {
        if (pud_large(pud)) {
index 6bc941be6921773f566efd701a213ef44793a793..96f9a9151fde02fc6f76633d76d292f47512d364 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Access to user system call parameters and results
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef _ASM_SYSCALL_H
index a702cb9d4269240c462764878b50e02971f5d8ae..25057c118d563d46f9b45a637670c1d124a88509 100644 (file)
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * definition for store system information stsi
  *
  * Copyright IBM Corp. 2001, 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Ulrich Weigand <weigand@de.ibm.com>
  *              Christian Borntraeger <borntraeger@de.ibm.com>
  */
index 1807229b292f005a4a0658ac221c3cb0481a39bf..cca406fdbe51fcf9985320c10988a02025b099bc 100644 (file)
@@ -53,6 +53,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
 static inline void topology_init_early(void) { }
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
 static inline void topology_expect_change(void) { }
 
 #endif /* CONFIG_SCHED_TOPOLOGY */
index 9ad172dcd912d5763b0bf954617c9e398ad31aa8..38535a57fef8327c3b08bf20e1f8621fd93c776a 100644 (file)
@@ -6,10 +6,6 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
index 0dc86b3a7cb0d6340d9de5bee6032f6bfcc0c85c..b9ab584adf43d71232ce44414e51413423fbf673 100644 (file)
@@ -4,9 +4,5 @@
  *
  * Copyright IBM Corp. 2008
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
index c36c97ffdc6fa24f246c41bfe993790410e4e032..84606b8cc49e47c794fb3b16ef5681de098bfc6f 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright 2014 IBM Corp.
  * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #ifndef __LINUX_KVM_PERF_S390_H
index 967aad39010515cd9614e51529517712630bbf4e..3a77833c74dc20e065e0bc3d77ce0bda9f955fa5 100644 (file)
@@ -4,10 +4,6 @@
  *
  * Copyright IBM Corp. 2013
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *  Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 #ifndef __KVM_VIRTIO_CCW_H
index 137ef473584ee5e2c6e42f70857cadd4e90df3b6..d568307321fcc54f51c1b3609dffc365cdc3a65d 100644 (file)
@@ -9,20 +9,6 @@
  *            Eric Rossman (edrossma@us.ibm.com)
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef __ASM_S390_ZCRYPT_H
index 58b9e127b61517c3f1cbe41a76757f7db156df76..80e974adb9e8be39d4ab558495eb69d110a2b3a3 100644 (file)
@@ -1392,7 +1392,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
        else
                except_str = "-";
        caller = (unsigned long) entry->caller;
-       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p  ",
+       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK  ",
                      area, sec, usec, level, except_str,
                      entry->id.fields.cpuid, (void *)caller);
        return rc;
index 3be829721cf948adc5349a342f04073a90dcf20e..b2c68fbf26346a3e9d6e626333a34c7c374bef31 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Disassemble s390 instructions.
  *
@@ -396,9 +397,14 @@ struct s390_insn *find_insn(unsigned char *code)
        unsigned char opfrag;
        int i;
 
+       /* Search the opcode offset table to find an entry which
+        * matches the beginning of the opcode. If there is no match
+        * the last entry will be used, which is the default entry for
+        * unknown instructions as well as 1-byte opcode instructions.
+        */
        for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
                entry = &opcode_offset[i];
-               if (entry->opcode == code[0] || entry->opcode == 0)
+               if (entry->opcode == code[0])
                        break;
        }
 
@@ -543,7 +549,7 @@ void show_code(struct pt_regs *regs)
                start += opsize;
                pr_cont("%s", buffer);
                ptr = buffer;
-               ptr += sprintf(ptr, "\n\t  ");
+               ptr += sprintf(ptr, "\n          ");
                hops++;
        }
        pr_cont("\n");
index 2aa545dca4d53c1c5b2bb7c6bdc18db10fd37930..5b23c4f6e50cd452177477105b914774d67898ad 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack dumping functions
  *
index a316cd6999ad9712defdf46db85e16eb429aebcb..9e5f6cd8e4c2e443a2c7fb46792157933447d170 100644 (file)
@@ -180,18 +180,17 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
  */
 ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       lgr     %r1,%r2
-       aghi    %r1,__TASK_thread               # thread_struct of prev task
-       lg      %r5,__TASK_stack(%r3)           # start of kernel stack of next
-       stg     %r15,__THREAD_ksp(%r1)          # store kernel stack of prev
-       lgr     %r1,%r3
-       aghi    %r1,__TASK_thread               # thread_struct of next task
+       lghi    %r4,__TASK_stack
+       lghi    %r1,__TASK_thread
+       lg      %r5,0(%r4,%r3)                  # start of kernel stack of next
+       stg     %r15,__THREAD_ksp(%r1,%r2)      # store kernel stack of prev
        lgr     %r15,%r5
        aghi    %r15,STACK_INIT                 # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
-       lg      %r15,__THREAD_ksp(%r1)          # load kernel stack of next
-       mvc     __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+       lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
+       aghi    %r3,__TASK_pid
+       mvc     __LC_CURRENT_PID(4,%r0),0(%r3)  # store pid of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
        bzr     %r14
index 310e59e6eb4b20bb7f17debb8ec412546289baf8..8ecb8726ac4762582a6ced188444ed14ea48cf46 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    ipl/reipl/dump support for Linux on s390.
  *
index 1a6521af17514a722c02e1b032fe3514d6857a15..af3722c28fd961283ff31c578d5329b4ee74fa60 100644 (file)
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel Probes (KProbes)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corp. 2002, 2006
  *
  * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
index bf9622f0e6b16aa291037d880f8ffe3ec75c1046..452502f9a0d986d4f8f97295143146cc26569c30 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux Guest Relocation (LGR) detection
  *
index 7b87991416fd6d882e7edf3f52b6f4af6fc45ad3..b7abfad4fd7df5583b19867dda1f81d22b81dcaa 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Kernel module help for s390.
  *
@@ -8,20 +9,6 @@
  *
  *  based on i386 version
  *    Copyright (C) 2001 Rusty Russell.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/module.h>
 #include <linux/elf.h>
index 6ff169253caeea0be88da521b84b796a72accb6e..c7a627620e5ebc4a7050167f15201abcad3eaf6e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   Machine check handler
  *
index 746d034233336f3804923a9340288d0f595f0211..cc085e2d2ce9907690fbe0912dd301ab44e8171d 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x - CPU-measurement Counter Facility
  *
  *  Copyright IBM Corp. 2012, 2017
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_cf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 227b38bd82c94f211392348ec03dd146549d19c4..1c9ddd7aa5ec8fd32ee626d036a3c3ea6ed79362 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for the System z CPU-measurement Sampling Facility
  *
  * Copyright IBM Corp. 2013
  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "cpum_sf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 93a386f4a3b5a4533e9b0d52c1db2548a72e20ef..0d770e513abf404ff2cea26f7c01931b4d60cf4b 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Performance event support for s390x
  *
  *  Copyright IBM Corp. 2012, 2013
  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #define KMSG_COMPONENT "perf"
 #define pr_fmt(fmt)    KMSG_COMPONENT ": " fmt
index 26c0523c14882d1b967ef8cb81a5cb6b387017a2..cd3df5514552cc262dee1d1b99260d0ee089668b 100644 (file)
@@ -1650,6 +1650,14 @@ static const struct user_regset s390_compat_regsets[] = {
                .get = s390_gs_cb_get,
                .set = s390_gs_cb_set,
        },
+       {
+               .core_note_type = NT_S390_GS_BC,
+               .n = sizeof(struct gs_cb) / sizeof(__u64),
+               .size = sizeof(__u64),
+               .align = sizeof(__u64),
+               .get = s390_gs_bc_get,
+               .set = s390_gs_bc_set,
+       },
        {
                .core_note_type = NT_S390_RI_CB,
                .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
index 090053cf279bb1d7077082dcc1a90ababb716478..793da97f9a6e53716e415bbc5e68cf844ce4cb1d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  S390 version
  *    Copyright IBM Corp. 1999, 2012
index cd4334e80b64cdb7908842f44015183be17c62f2..b8c1a85bcf2de75eccba0b5b86eb2870bd4b71b6 100644 (file)
@@ -55,6 +55,7 @@
 #include <asm/sigp.h>
 #include <asm/idle.h>
 #include <asm/nmi.h>
+#include <asm/topology.h>
 #include "entry.h"
 
 enum {
index e66687dc61446dc929c4450a7c887740dca11595..460dcfba7d4ec08db7de61942ea387ef38579a99 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Stack trace management functions
  *
index 12981e197f0125dcaea9f0bcb13f49207be00cec..80b862e9c53c6b108e611935ab8dd7c9b794ff13 100644 (file)
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * store hypervisor information instruction emulation functions.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  * Copyright IBM Corp. 2016
  * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
  */
index 5cbd52169348faf9c09bc258c2cd0072e0089ee0..cf561160ea887f9b6395e1d5ec30f9ee02aba77d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Time of day based timer functions.
  *
@@ -523,7 +524,7 @@ static void __init stp_reset(void)
        }
 }
 
-static void stp_timeout(unsigned long dummy)
+static void stp_timeout(struct timer_list *unused)
 {
        queue_work(time_sync_wq, &stp_work);
 }
@@ -532,7 +533,7 @@ static int __init stp_init(void)
 {
        if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
                return 0;
-       setup_timer(&stp_timer, stp_timeout, 0UL);
+       timer_setup(&stp_timer, stp_timeout, 0);
        time_init_wq();
        if (!stp_online)
                return 0;
index f9b393d4a078365ff8f83db8feaae839a20e0274..4d5b65e527b5495f17598ea20eb22bfb2c5ff754 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
index 39a218703c50add3defe9aa2ce5a85f88188e776..f3a1c7c6824ef0da8933fbb64864fdf5b97bd99a 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * vdso setup for s390
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 
 #include <linux/init.h>
index eca3f001f081309c88372de459a6dfcccd754e7d..f61df5253c23c55ed26b357a645b16f4ab2e26a4 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index a5769b83d90e687f08175af96ded9a4893ebd2d2..2d6ec3abe095ea30bac898e85fd4cf4faf06bbe5 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 63b86dceb0bfec0f72886d96230e5248620f8728..aa8bf13a2edb1f77c861ed6de0acc2a2836e26fa 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 32 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index c8513deb8c663f51fa4f4bc2b2299ea24f1aaefd..faf5213b15dfae9536f2583b601c1b4610558249 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_getres() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index 5d7b56b49458d03ba885f105a9c07bcdecea019a..6046b3bfca4622ea87bd1759bfa36e3f2d5b6d89 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of clock_gettime() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index b02e62f3bc12d4ffb3850aff587d20ad944f4688..cc9dbc27da6fbcd22e865dd502be7f8bd515a11c 100644 (file)
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Userland implementation of gettimeofday() for 64 bits processes in a
  * s390 kernel for use in the vDSO
  *
  *  Copyright IBM Corp. 2008
  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
  */
 #include <asm/vdso.h>
 #include <asm/asm-offsets.h>
index dd7178fbb4f3bd3f32955eeacf6e640f371e06a3..f24395a0191828ec7a638042632c69d4f4fcb939 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Virtual cpu timer based timer functions.
  *
index 98ad8b9e036093c8a784cfc0dfd3887e925c6357..9614aea5839b6ecf1c36e2ccbbd64e2592621dd9 100644 (file)
@@ -3372,7 +3372,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int rc;
-       sigset_t sigsaved;
 
        if (kvm_run->immediate_exit)
                return -EINTR;
@@ -3382,8 +3381,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return 0;
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
                kvm_s390_vcpu_start(vcpu);
@@ -3417,8 +3415,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        disable_cpu_timer_accounting(vcpu);
        store_regs(vcpu, kvm_run);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        vcpu->stat.exit_userspace++;
        return rc;
index 2dbdcd85b68f200762846ce0c59dc39438d0f9e0..6cf024eb2085d86e6a729e3cfa94e80a50f02a38 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Collaborative memory management interface.
  *
@@ -56,10 +57,10 @@ static DEFINE_SPINLOCK(cmm_lock);
 
 static struct task_struct *cmm_thread_ptr;
 static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL);
 
-static void cmm_timer_fn(unsigned long);
+static void cmm_timer_fn(struct timer_list *);
 static void cmm_set_timer(void);
+static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
 
 static long cmm_alloc_pages(long nr, long *counter,
                            struct cmm_page_array **list)
@@ -194,13 +195,11 @@ static void cmm_set_timer(void)
                if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
                        return;
        }
-       cmm_timer.function = cmm_timer_fn;
-       cmm_timer.data = 0;
        cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
        add_timer(&cmm_timer);
 }
 
-static void cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(struct timer_list *unused)
 {
        long nr;
 
index b2c140193b0af72273ffcbafd78a4ee38417ca42..05d459b638f55d563d479eb978a5d60f0e421e1b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  KVM guest address space mapping code
  *
index 5bea139517a2edc21dc50074d2c2e9a94dabb19e..831bdcf407bbc1d2d76edc78e6a9f50aae1406cb 100644 (file)
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  flexible mmap layout support
  *
  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  * All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *
  * Started by Ingo Molnar <mingo@elte.hu>
  */
 
index ae677f814bc07a406f7f996a81ed2db65718f5ff..4f2b65d01a70418c802d6ce33713101ec0e2909b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2011
  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
index 0fe649c0d5423a2ed51fcff4dc7d011204fdf4e9..4902fed221c0effa59ff21fedabdda152641d112 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
index c2f786f0ea0688c5fb9c36659ff66fb8140e6f2b..b482e95b6249e380dfb39d89253789c61dedb1e3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012,2015
  *
index 0d300ee00f4e95b987884bbeb0fa1fce1bee7b59..f7aa5a77827ec17d893d59834a0d33082bb8fd82 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2012
  *
index 81b840bc6e4e733064d20f309bb57f24820f01cb..19bcb3b45a70fc12fa426d636fd4482c570c6654 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * s390 specific pci instructions
  *
index 280bbff121020ef49298705a1c86233f1ded1c6f..65300193b99f316c6d3f39c7db60f545272789c3 100644 (file)
@@ -15,6 +15,12 @@ ifneq ($(SUBARCH),$(ARCH))
   endif
 endif
 
+ifeq ($(ARCH),sh)
+KBUILD_DEFCONFIG       := shx3_defconfig
+else
+KBUILD_DEFCONFIG       := cayman_defconfig
+endif
+
 isa-y                                  := any
 isa-$(CONFIG_SH_DSP)                   := sh
 isa-$(CONFIG_CPU_SH2)                  := sh2
@@ -105,14 +111,12 @@ ifdef CONFIG_SUPERH32
 UTS_MACHINE            := sh
 BITS                   := 32
 LDFLAGS_vmlinux                += -e _stext
-KBUILD_DEFCONFIG       := shx3_defconfig
 else
 UTS_MACHINE            := sh64
 BITS                   := 64
 LDFLAGS_vmlinux                += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
                           --defsym phys_stext_shmedia=phys_stext+1 \
                           -e phys_stext_shmedia
-KBUILD_DEFCONFIG       := cayman_defconfig
 endif
 
 ifdef CONFIG_CPU_LITTLE_ENDIAN
index 2374a83d87b26f8b968f12d7895650d46d03b771..edff113f1b85f7361ff3848939dabd1a961ea2c4 100644 (file)
@@ -1 +1,6 @@
+ashiftrt.S
+ashldi3.c
+ashlsi3.S
+ashrsi3.S
+lshrsi3.S
 vmlinux.bin.*
index f2d9d3079d4e623fdcaf69730651d43da37b3fad..627ce8e75e016422102f0572c564bef3df2958b2 100644 (file)
@@ -104,6 +104,18 @@ static void error(char *x)
        while(1);       /* Halt */
 }
 
+unsigned long __stack_chk_guard;
+
+void __stack_chk_guard_setup(void)
+{
+       __stack_chk_guard = 0x000a0dff;
+}
+
+void __stack_chk_fail(void)
+{
+       error("stack-protector: Kernel stack is corrupted\n");
+}
+
 #ifdef CONFIG_SUPERH64
 #define stackalign     8
 #else
@@ -118,6 +130,8 @@ void decompress_kernel(void)
 {
        unsigned long output_addr;
 
+       __stack_chk_guard_setup();
+
 #ifdef CONFIG_SUPERH64
        output_addr = (CONFIG_MEMORY_START + 0x2000);
 #else
index c6d96049a0bb07a91e3515fdc5d3261489d60fbe..e8af2ff29bc3b06ca974a5e7a5139a4b2ba52a29 100644 (file)
@@ -59,9 +59,9 @@ static inline void heartbeat_toggle_bit(struct heartbeat_data *hd,
        }
 }
 
-static void heartbeat_timer(unsigned long data)
+static void heartbeat_timer(struct timer_list *t)
 {
-       struct heartbeat_data *hd = (struct heartbeat_data *)data;
+       struct heartbeat_data *hd = from_timer(hd, t, timer);
        static unsigned bit = 0, up = 1;
 
        heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED);
@@ -133,7 +133,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev)
                }
        }
 
-       setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
+       timer_setup(&hd->timer, heartbeat_timer, 0);
        platform_set_drvdata(pdev, hd);
 
        return mod_timer(&hd->timer, jiffies + 1);
index cae707f3472dc59fbc79d9fe8ecfeeb75806e666..fe163ecd071970737faf086d47cffe8605d3c046 100644 (file)
@@ -85,18 +85,18 @@ int __init pci_is_66mhz_capable(struct pci_channel *hose,
        return cap66 > 0;
 }
 
-static void pcibios_enable_err(unsigned long __data)
+static void pcibios_enable_err(struct timer_list *t)
 {
-       struct pci_channel *hose = (struct pci_channel *)__data;
+       struct pci_channel *hose = from_timer(hose, t, err_timer);
 
        del_timer(&hose->err_timer);
        printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n");
        enable_irq(hose->err_irq);
 }
 
-static void pcibios_enable_serr(unsigned long __data)
+static void pcibios_enable_serr(struct timer_list *t)
 {
-       struct pci_channel *hose = (struct pci_channel *)__data;
+       struct pci_channel *hose = from_timer(hose, t, serr_timer);
 
        del_timer(&hose->serr_timer);
        printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n");
@@ -106,15 +106,11 @@ static void pcibios_enable_serr(unsigned long __data)
 void pcibios_enable_timers(struct pci_channel *hose)
 {
        if (hose->err_irq) {
-               init_timer(&hose->err_timer);
-               hose->err_timer.data = (unsigned long)hose;
-               hose->err_timer.function = pcibios_enable_err;
+               timer_setup(&hose->err_timer, pcibios_enable_err, 0);
        }
 
        if (hose->serr_irq) {
-               init_timer(&hose->serr_timer);
-               hose->serr_timer.data = (unsigned long)hose;
-               hose->serr_timer.function = pcibios_enable_serr;
+               timer_setup(&hose->serr_timer, pcibios_enable_serr, 0);
        }
 }
 
index 5bfb341cc5c4ad71eefe29a8f5175f715d07c59b..a171811602337e01c46230e546dd8855ba717eb3 100644 (file)
@@ -26,9 +26,9 @@ static ssize_t switch_show(struct device *dev,
 }
 static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
 
-static void switch_timer(unsigned long data)
+static void switch_timer(struct timer_list *t)
 {
-       struct push_switch *psw = (struct push_switch *)data;
+       struct push_switch *psw = from_timer(psw, t, debounce);
 
        schedule_work(&psw->work);
 }
@@ -78,10 +78,7 @@ static int switch_drv_probe(struct platform_device *pdev)
        }
 
        INIT_WORK(&psw->work, switch_work_handler);
-       init_timer(&psw->debounce);
-
-       psw->debounce.function = switch_timer;
-       psw->debounce.data = (unsigned long)psw;
+       timer_setup(&psw->debounce, switch_timer, 0);
 
        /* Workqueue API brain-damage */
        psw->pdev = pdev;
index 9a32eb4098dfcfeacc0eee666cb2562c32e687ed..1db470e024565b5b27e93a5e8327bf3fa9845381 100644 (file)
@@ -5,7 +5,6 @@
 #ifdef CONFIG_NUMA
 
 #define cpu_to_node(cpu)       ((void)(cpu),0)
-#define parent_node(node)      ((void)(node),0)
 
 #define cpumask_of_node(node)  ((void)node, cpu_online_mask)
 
index 675afa285ddb75d66d849f2d07188f330711d362..b4d0f570cc003bf470c8706a8144ed8373832358 100644 (file)
@@ -7,3 +7,4 @@ obj-y += mm/
 obj-y += math-emu/
 obj-y += net/
 obj-y += crypto/
+obj-$(CONFIG_SPARC64) += vdso/
index 987a57502909b4eb81bc86afa286f1140955ade0..6bf594ace663ec82f746132b4f62fc351bf5160c 100644 (file)
@@ -84,6 +84,8 @@ config SPARC64
        select HAVE_REGS_AND_STACK_ACCESS_API
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
+       select GENERIC_TIME_VSYSCALL
+       select ARCH_CLOCKSOURCE_DATA
 
 config ARCH_DEFCONFIG
        string
index dbc448923f48f84b69757171be434c6f71a64355..edac927e4952347555fe6b60543d6ebf105088c9 100644 (file)
@@ -81,6 +81,10 @@ install:
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
 
+PHONY += vdso_install
+vdso_install:
+       $(Q)$(MAKE) $(build)=arch/sparc/vdso $@
+
 # This is the image used for packaging
 KBUILD_IMAGE := $(boot)/zImage
 
index a90eea24b2862fcc9fa297f37c494da3eda6ecc5..ca7ea5913494f9b66991c1659fcba70f04c525be 100644 (file)
@@ -23,10 +23,11 @@ void set_bit(unsigned long nr, volatile unsigned long *addr);
 void clear_bit(unsigned long nr, volatile unsigned long *addr);
 void change_bit(unsigned long nr, volatile unsigned long *addr);
 
+int fls(unsigned int word);
+int __fls(unsigned long word);
+
 #include <asm-generic/bitops/non-atomic.h>
 
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
 #ifdef __KERNEL__
diff --git a/arch/sparc/include/asm/clocksource.h b/arch/sparc/include/asm/clocksource.h
new file mode 100644 (file)
index 0000000..d63ef22
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _ASM_SPARC_CLOCKSOURCE_H
+#define _ASM_SPARC_CLOCKSOURCE_H
+
+/* VDSO clocksources */
+#define VCLOCK_NONE   0  /* Nothing userspace can do. */
+#define VCLOCK_TICK   1  /* Use %tick.  */
+#define VCLOCK_STICK  2  /* Use %stick. */
+
+struct arch_clocksource_data {
+       int vclock_mode;
+};
+
+#endif /* _ASM_SPARC_CLOCKSOURCE_H */
index 3e3823db303e7d4016a9d5116d0a11e31844dfff..c73b5a3ab7b91f971d7ca9f3f09e7814fd113856 100644 (file)
@@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
                        (unsigned long)_n_, sizeof(*(ptr)));            \
 })
 
+u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
+#define cmpxchg64(ptr, old, new)       __cmpxchg_u64(ptr, old, new)
+
 #include <asm-generic/cmpxchg-local.h>
 
 /*
index 5894389f5ed56ea9f88994d87f5d6a9c442195ba..25340df3570c78f1722ae328885927cb39c0dc90 100644 (file)
@@ -211,4 +211,18 @@ do {       if ((ex).e_ident[EI_CLASS] == ELFCLASS32)       \
                        (current->personality & (~PER_MASK)));  \
 } while (0)
 
+extern unsigned int vdso_enabled;
+
+#define        ARCH_DLINFO                                                     \
+do {                                                                   \
+       if (vdso_enabled)                                               \
+               NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
+                           (unsigned long)current->mm->context.vdso);  \
+} while (0)
+
+struct linux_binprm;
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES        1
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+                                       int uses_interp);
 #endif /* !(__ASM_SPARC64_ELF_H) */
index 5fe64a57b4ba9c9e8964e4ff97b3c2b41f721d14..ad4fb93508ba14a8e61b24369c9afa9feaf67b98 100644 (file)
@@ -97,6 +97,7 @@ typedef struct {
        unsigned long           thp_pte_count;
        struct tsb_config       tsb_block[MM_NUM_TSBS];
        struct hv_tsb_descr     tsb_descr[MM_NUM_TSBS];
+       void                    *vdso;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
index e25d25b0a34b52c28a1bf122d7dee3a350b8c35c..b361702ef52a7529aee7849fdf310d28ef338a76 100644 (file)
@@ -8,9 +8,11 @@
 
 #include <linux/spinlock.h>
 #include <linux/mm_types.h>
+#include <linux/smp.h>
 
 #include <asm/spitfire.h>
 #include <asm-generic/mm_hooks.h>
+#include <asm/percpu.h>
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
index 5a9e96be16652bc13bb4e6cd0f298b0e613d5883..9937c5ff94a9fe9eaeb8744ee2786ad7a5a2afa2 100644 (file)
@@ -715,7 +715,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return pte_pfn(pte);
 }
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline unsigned long pmd_write(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
index c7c79fe8d2655bd3e54177e84ff62395afd2cac6..aac23d4a4ddd5647643dfdb3e337492802f57a8c 100644 (file)
@@ -200,6 +200,13 @@ unsigned long get_wchan(struct task_struct *task);
  * To make a long story short, we are trying to yield the current cpu
  * strand during busy loops.
  */
+#ifdef BUILD_VDSO
+#define        cpu_relax()     asm volatile("\n99:\n\t"                        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    ::: "memory")
+#else /* ! BUILD_VDSO */
 #define cpu_relax()    asm volatile("\n99:\n\t"                        \
                                     "rd        %%ccr, %%g0\n\t"        \
                                     "rd        %%ccr, %%g0\n\t"        \
@@ -211,6 +218,7 @@ unsigned long get_wchan(struct task_struct *task);
                                     "nop\n\t"                          \
                                     ".previous"                        \
                                     ::: "memory")
+#endif
 
 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
index 3831b1911a19a3afa7899428eb5c80da4c03cccb..34c628a22ea5b5ba5a186536b7a23df9eaf1785e 100644 (file)
@@ -11,8 +11,6 @@ static inline int cpu_to_node(int cpu)
        return numa_cpu_lookup_table[cpu];
 }
 
-#define parent_node(node)      (node)
-
 #define cpumask_of_node(node) ((node) == -1 ?                          \
                               cpu_all_mask :                           \
                               &numa_cpumask_lookup_table[node])
index 25b6abdb39083c6286d1c59984c8ff3179ba4852..522a677e050d757d9d19afb663b3adeda02245a1 100644 (file)
@@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
        sllx            REG2, 32, REG2;                 \
        andcc           REG1, REG2, %g0;                \
        be,pt           %xcc, 700f;                     \
-        sethi          %hi(0x1ffc0000), REG2;          \
+        sethi          %hi(0xffe00000), REG2;          \
        sllx            REG2, 1, REG2;                  \
        brgez,pn        REG1, FAIL_LABEL;               \
         andn           REG1, REG2, REG1;               \
diff --git a/arch/sparc/include/asm/vdso.h b/arch/sparc/include/asm/vdso.h
new file mode 100644 (file)
index 0000000..93b6287
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _ASM_SPARC_VDSO_H
+#define _ASM_SPARC_VDSO_H
+
+struct vdso_image {
+       void *data;
+       unsigned long size;   /* Always a multiple of PAGE_SIZE */
+       long sym_vvar_start;  /* Negative offset to the vvar area */
+       long sym_vread_tick; /* Start of vread_tick section */
+       long sym_vread_tick_patch_start; /* Start of tick read */
+       long sym_vread_tick_patch_end;   /* End of tick read */
+};
+
+#ifdef CONFIG_SPARC64
+extern const struct vdso_image vdso_image_64_builtin;
+#endif
+#ifdef CONFIG_COMPAT
+extern const struct vdso_image vdso_image_32_builtin;
+#endif
+
+#endif /* _ASM_SPARC_VDSO_H */
diff --git a/arch/sparc/include/asm/vvar.h b/arch/sparc/include/asm/vvar.h
new file mode 100644 (file)
index 0000000..0289503
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _ASM_SPARC_VVAR_DATA_H
+#define _ASM_SPARC_VVAR_DATA_H
+
+#include <asm/clocksource.h>
+#include <linux/seqlock.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+struct vvar_data {
+       unsigned int seq;
+
+       int vclock_mode;
+       struct { /* extract of a clocksource struct */
+               u64     cycle_last;
+               u64     mask;
+               int     mult;
+               int     shift;
+       } clock;
+       /* open coded 'struct timespec' */
+       u64             wall_time_sec;
+       u64             wall_time_snsec;
+       u64             monotonic_time_snsec;
+       u64             monotonic_time_sec;
+       u64             monotonic_time_coarse_sec;
+       u64             monotonic_time_coarse_nsec;
+       u64             wall_time_coarse_sec;
+       u64             wall_time_coarse_nsec;
+
+       int             tz_minuteswest;
+       int             tz_dsttime;
+};
+
+extern struct vvar_data *vvar_data;
+extern int vdso_fix_stick;
+
+static inline unsigned int vvar_read_begin(const struct vvar_data *s)
+{
+       unsigned int ret;
+
+repeat:
+       ret = READ_ONCE(s->seq);
+       if (unlikely(ret & 1)) {
+               cpu_relax();
+               goto repeat;
+       }
+       smp_rmb(); /* Finish all reads before we return seq */
+       return ret;
+}
+
+static inline int vvar_read_retry(const struct vvar_data *s,
+                                       unsigned int start)
+{
+       smp_rmb(); /* Finish all reads before checking the value of seq */
+       return unlikely(s->seq != start);
+}
+
+static inline void vvar_write_begin(struct vvar_data *s)
+{
+       ++s->seq;
+       smp_wmb(); /* Makes sure that increment of seq is reflected */
+}
+
+static inline void vvar_write_end(struct vvar_data *s)
+{
+       smp_wmb(); /* Makes the value of seq current before we increment */
+       ++s->seq;
+}
+
+
+#endif /* _ASM_SPARC_VVAR_DATA_H */
index ad6f360261f609f23250b8f5a8ca0b66f68eb2b3..5f80a70cc901ab02e5756ddad31eee4791538616 100644 (file)
@@ -1,4 +1,8 @@
 #ifndef __ASMSPARC_AUXVEC_H
 #define __ASMSPARC_AUXVEC_H
 
+#define AT_SYSINFO_EHDR                33
+
+#define AT_VECTOR_SIZE_ARCH    1
+
 #endif /* !(__ASMSPARC_AUXVEC_H) */
index 8de9617589a52e197a6e4ead214910c3e8c733a9..cc97545737f0554e05ff16cfbd1a8460e5781a75 100644 (file)
@@ -43,6 +43,7 @@ obj-$(CONFIG_SPARC32)   += systbls_32.o
 obj-y                   += time_$(BITS).o
 obj-$(CONFIG_SPARC32)   += windows.o
 obj-y                   += cpu.o
+obj-$(CONFIG_SPARC64)  += vdso.o
 obj-$(CONFIG_SPARC32)   += devices.o
 obj-y                   += ptrace_$(BITS).o
 obj-y                   += unaligned_$(BITS).o
index 9e293de120520b55822e63590c2b63ac0d99136f..a41e6e16eb367d46d16d24f5876f58c151a45db1 100644 (file)
@@ -641,6 +641,8 @@ niagara4_patch:
         nop
        call    niagara4_patch_pageops
         nop
+       call    niagara4_patch_fls
+        nop
 
        ba,a,pt %xcc, 80f
         nop
index 1ef6156b15305d40dc8382b4fabb6af974d8a01b..418592a09b411f6cac770ef8f16d807d81d3c3b6 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/miscdevice.h>
 #include <linux/bootmem.h>
 #include <linux/export.h>
+#include <linux/refcount.h>
 
 #include <asm/cpudata.h>
 #include <asm/hypervisor.h>
@@ -71,7 +72,7 @@ struct mdesc_handle {
        struct list_head        list;
        struct mdesc_mem_ops    *mops;
        void                    *self_base;
-       atomic_t                refcnt;
+       refcount_t              refcnt;
        unsigned int            handle_size;
        struct mdesc_hdr        mdesc;
 };
@@ -153,7 +154,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
        memset(hp, 0, handle_size);
        INIT_LIST_HEAD(&hp->list);
        hp->self_base = base;
-       atomic_set(&hp->refcnt, 1);
+       refcount_set(&hp->refcnt, 1);
        hp->handle_size = handle_size;
 }
 
@@ -183,7 +184,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
        unsigned int alloc_size;
        unsigned long start;
 
-       BUG_ON(atomic_read(&hp->refcnt) != 0);
+       BUG_ON(refcount_read(&hp->refcnt) != 0);
        BUG_ON(!list_empty(&hp->list));
 
        alloc_size = PAGE_ALIGN(hp->handle_size);
@@ -221,7 +222,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
 
 static void mdesc_kfree(struct mdesc_handle *hp)
 {
-       BUG_ON(atomic_read(&hp->refcnt) != 0);
+       BUG_ON(refcount_read(&hp->refcnt) != 0);
        BUG_ON(!list_empty(&hp->list));
 
        kfree(hp->self_base);
@@ -260,7 +261,7 @@ struct mdesc_handle *mdesc_grab(void)
        spin_lock_irqsave(&mdesc_lock, flags);
        hp = cur_mdesc;
        if (hp)
-               atomic_inc(&hp->refcnt);
+               refcount_inc(&hp->refcnt);
        spin_unlock_irqrestore(&mdesc_lock, flags);
 
        return hp;
@@ -272,7 +273,7 @@ void mdesc_release(struct mdesc_handle *hp)
        unsigned long flags;
 
        spin_lock_irqsave(&mdesc_lock, flags);
-       if (atomic_dec_and_test(&hp->refcnt)) {
+       if (refcount_dec_and_test(&hp->refcnt)) {
                list_del_init(&hp->list);
                hp->mops->free(hp);
        }
@@ -514,7 +515,7 @@ void mdesc_update(void)
        if (status != HV_EOK || real_len > len) {
                printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
                       status);
-               atomic_dec(&hp->refcnt);
+               refcount_dec(&hp->refcnt);
                mdesc_free(hp);
                goto out;
        }
@@ -527,7 +528,7 @@ void mdesc_update(void)
        mdesc_notify_clients(orig_hp, hp);
 
        spin_lock_irqsave(&mdesc_lock, flags);
-       if (atomic_dec_and_test(&orig_hp->refcnt))
+       if (refcount_dec_and_test(&orig_hp->refcnt))
                mdesc_free(orig_hp);
        else
                list_add(&orig_hp->list, &mdesc_zombie_list);
index 3b397081047af69a23b6672917e1706786d3134f..2ef8cfa9677ed5b034640be15933e0639026ad49 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/jiffies.h>
 #include <linux/cpufreq.h>
 #include <linux/percpu.h>
-#include <linux/miscdevice.h>
 #include <linux/rtc/m48t59.h>
 #include <linux/kernel_stat.h>
 #include <linux/clockchips.h>
@@ -54,6 +53,8 @@
 
 DEFINE_SPINLOCK(rtc_lock);
 
+unsigned int __read_mostly vdso_fix_stick;
+
 #ifdef CONFIG_SMP
 unsigned long profile_pc(struct pt_regs *regs)
 {
@@ -831,12 +832,17 @@ static void init_tick_ops(struct sparc64_tick_ops *ops)
 void __init time_init_early(void)
 {
        if (tlb_type == spitfire) {
-               if (is_hummingbird())
+               if (is_hummingbird()) {
                        init_tick_ops(&hbtick_operations);
-               else
+                       clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
+               } else {
                        init_tick_ops(&tick_operations);
+                       clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
+                       vdso_fix_stick = 1;
+               }
        } else {
                init_tick_ops(&stick_operations);
+               clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
        }
 }
 
diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c
new file mode 100644 (file)
index 0000000..5888066
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ *  Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ *  Thanks to hpa@transmeta.com for some useful hint.
+ *  Special thanks to Ingo Molnar for his early experience with
+ *  a different vsyscall implementation for Linux/IA32 and for the name.
+ */
+
+#include <linux/seqlock.h>
+#include <linux/time.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/vvar.h>
+
+void update_vsyscall_tz(void)
+{
+       if (unlikely(vvar_data == NULL))
+               return;
+
+       vvar_data->tz_minuteswest = sys_tz.tz_minuteswest;
+       vvar_data->tz_dsttime = sys_tz.tz_dsttime;
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+       struct vvar_data *vdata = vvar_data;
+
+       if (unlikely(vdata == NULL))
+               return;
+
+       vvar_write_begin(vdata);
+       vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
+       vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
+       vdata->clock.mask = tk->tkr_mono.mask;
+       vdata->clock.mult = tk->tkr_mono.mult;
+       vdata->clock.shift = tk->tkr_mono.shift;
+
+       vdata->wall_time_sec = tk->xtime_sec;
+       vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
+
+       vdata->monotonic_time_sec = tk->xtime_sec +
+                                   tk->wall_to_monotonic.tv_sec;
+       vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
+                                     (tk->wall_to_monotonic.tv_nsec <<
+                                      tk->tkr_mono.shift);
+
+       while (vdata->monotonic_time_snsec >=
+              (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+               vdata->monotonic_time_snsec -=
+                               ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
+               vdata->monotonic_time_sec++;
+       }
+
+       vdata->wall_time_coarse_sec = tk->xtime_sec;
+       vdata->wall_time_coarse_nsec =
+                       (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
+
+       vdata->monotonic_time_coarse_sec =
+               vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
+       vdata->monotonic_time_coarse_nsec =
+               vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+
+       while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
+               vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
+               vdata->monotonic_time_coarse_sec++;
+       }
+
+       vvar_write_end(vdata);
+}
index c858f5f3ce2c13351ac468563b4b0595ddbd9f44..635d67ffc9a39f72f3acd24f94b052e754fcfcb1 100644 (file)
@@ -798,9 +798,9 @@ void vio_port_up(struct vio_driver_state *vio)
 }
 EXPORT_SYMBOL(vio_port_up);
 
-static void vio_port_timer(unsigned long _arg)
+static void vio_port_timer(struct timer_list *t)
 {
-       struct vio_driver_state *vio = (struct vio_driver_state *) _arg;
+       struct vio_driver_state *vio = from_timer(vio, t, timer);
 
        vio_port_up(vio);
 }
@@ -849,7 +849,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
 
        vio->ops = ops;
 
-       setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio);
+       timer_setup(&vio->timer, vio_port_timer, 0);
 
        return 0;
 }
index 44829a8dc45818e819a5f08aa67846599f5b41b4..063556fe2cb1d8877c0a6028621658e6dd041d07 100644 (file)
@@ -17,6 +17,9 @@ lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 lib-$(CONFIG_SPARC64) += multi3.o
+lib-$(CONFIG_SPARC64) += fls.o
+lib-$(CONFIG_SPARC64) += fls64.o
+lib-$(CONFIG_SPARC64) += NG4fls.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/NG4fls.S b/arch/sparc/lib/NG4fls.S
new file mode 100644 (file)
index 0000000..2d0991e
--- /dev/null
@@ -0,0 +1,30 @@
+/* NG4fls.S: SPARC optimized fls and __fls for T4 and above.
+ *
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/linkage.h>
+
+#define LZCNT_O0_G2    \
+       .word   0x85b002e8
+
+       .text
+       .register       %g2, #scratch
+       .register       %g3, #scratch
+
+ENTRY(NG4fls)
+       LZCNT_O0_G2     !lzcnt  %o0, %g2
+       mov     64, %g3
+       retl
+        sub    %g3, %g2, %o0
+ENDPROC(NG4fls)
+
+ENTRY(__NG4fls)
+       brz,pn  %o0, 1f
+       LZCNT_O0_G2     !lzcnt  %o0, %g2
+       mov     63, %g3
+       sub     %g3, %g2, %o0
+1:
+       retl
+        nop
+ENDPROC(__NG4fls)
index aa58ab39f9a6293ab496f61d21ba40daed5e7df6..37866175c921e2bfdca67d990a2a18cf5a24f08b 100644 (file)
@@ -4,6 +4,8 @@
  * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
  */
 
+#include <linux/linkage.h>
+
 #define BRANCH_ALWAYS  0x10680000
 #define NOP            0x01000000
 #define NG_DO_PATCH(OLD, NEW)  \
@@ -53,3 +55,10 @@ niagara4_patch_pageops:
        retl
         nop
        .size   niagara4_patch_pageops,.-niagara4_patch_pageops
+
+ENTRY(niagara4_patch_fls)
+       NG_DO_PATCH(fls, NG4fls)
+       NG_DO_PATCH(__fls, __NG4fls)
+       retl
+        nop
+ENDPROC(niagara4_patch_fls)
index 5010df4973879fb1b17e689e01ae11ca754ffc56..465a901a0ada71aef7ac08e36c90b7f528ae809d 100644 (file)
@@ -173,6 +173,20 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
 }
 EXPORT_SYMBOL(__cmpxchg_u32);
 
+u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
+{
+       unsigned long flags;
+       u64 prev;
+
+       spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+       if ((prev = *ptr) == old)
+               *ptr = new;
+       spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+       return prev;
+}
+EXPORT_SYMBOL(__cmpxchg_u64);
+
 unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
 {
        unsigned long flags;
diff --git a/arch/sparc/lib/fls.S b/arch/sparc/lib/fls.S
new file mode 100644 (file)
index 0000000..06b8d30
--- /dev/null
@@ -0,0 +1,67 @@
+/* fls.S: SPARC default fls definition.
+ *
+ * SPARC default fls definition, which follows the same algorithm as
+ * in generic fls(). This function will be boot time patched on T4
+ * and onward.
+ */
+
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+       .text
+       .register       %g2, #scratch
+       .register       %g3, #scratch
+ENTRY(fls)
+       brz,pn  %o0, 6f
+        mov    0, %o1
+       sethi   %hi(0xffff0000), %g3
+       mov     %o0, %g2
+       andcc   %o0, %g3, %g0
+       be,pt   %icc, 8f
+        mov    32, %o1
+       sethi   %hi(0xff000000), %g3
+       andcc   %g2, %g3, %g0
+       bne,pt  %icc, 3f
+        sethi  %hi(0xf0000000), %g3
+       sll     %o0, 8, %o0
+1:
+       add     %o1, -8, %o1
+       sra     %o0, 0, %o0
+       mov     %o0, %g2
+2:
+       sethi   %hi(0xf0000000), %g3
+3:
+       andcc   %g2, %g3, %g0
+       bne,pt  %icc, 4f
+        sethi  %hi(0xc0000000), %g3
+       sll     %o0, 4, %o0
+       add     %o1, -4, %o1
+       sra     %o0, 0, %o0
+       mov     %o0, %g2
+4:
+       andcc   %g2, %g3, %g0
+       be,a,pt %icc, 7f
+        sll    %o0, 2, %o0
+5:
+       xnor    %g0, %o0, %o0
+       srl     %o0, 31, %o0
+       sub     %o1, %o0, %o1
+6:
+       jmp     %o7 + 8
+        sra    %o1, 0, %o0
+7:
+       add     %o1, -2, %o1
+       ba,pt   %xcc, 5b
+        sra    %o0, 0, %o0
+8:
+       sll     %o0, 16, %o0
+       sethi   %hi(0xff000000), %g3
+       sra     %o0, 0, %o0
+       mov     %o0, %g2
+       andcc   %g2, %g3, %g0
+       bne,pt  %icc, 2b
+        mov    16, %o1
+       ba,pt   %xcc, 1b
+        sll    %o0, 8, %o0
+ENDPROC(fls)
+EXPORT_SYMBOL(fls)
diff --git a/arch/sparc/lib/fls64.S b/arch/sparc/lib/fls64.S
new file mode 100644 (file)
index 0000000..c83e22a
--- /dev/null
@@ -0,0 +1,61 @@
+/* fls64.S: SPARC default __fls definition.
+ *
+ * SPARC default __fls definition, which follows the same algorithm as
+ * in generic __fls(). This function will be boot time patched on T4
+ * and onward.
+ */
+
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+       .text
+       .register       %g2, #scratch
+       .register       %g3, #scratch
+ENTRY(__fls)
+       mov     -1, %g2
+       sllx    %g2, 32, %g2
+       and     %o0, %g2, %g2
+       brnz,pt %g2, 1f
+        mov    63, %g1
+       sllx    %o0, 32, %o0
+       mov     31, %g1
+1:
+       mov     -1, %g2
+       sllx    %g2, 48, %g2
+       and     %o0, %g2, %g2
+       brnz,pt %g2, 2f
+        mov    -1, %g2
+       sllx    %o0, 16, %o0
+       add     %g1, -16, %g1
+2:
+       mov     -1, %g2
+       sllx    %g2, 56, %g2
+       and     %o0, %g2, %g2
+       brnz,pt %g2, 3f
+        mov    -1, %g2
+       sllx    %o0, 8, %o0
+       add     %g1, -8, %g1
+3:
+       sllx    %g2, 60, %g2
+       and     %o0, %g2, %g2
+       brnz,pt %g2, 4f
+        mov    -1, %g2
+       sllx    %o0, 4, %o0
+       add     %g1, -4, %g1
+4:
+       sllx    %g2, 62, %g2
+       and     %o0, %g2, %g2
+       brnz,pt %g2, 5f
+        mov    -1, %g3
+       sllx    %o0, 2, %o0
+       add     %g1, -2, %g1
+5:
+       mov     0, %g2
+       sllx    %g3, 63, %g3
+       and     %o0, %g3, %o0
+       movre   %o0, 1, %g2
+       sub     %g1, %g2, %g1
+       jmp     %o7+8
+        sra    %g1, 0, %o0
+ENDPROC(__fls)
+EXPORT_SYMBOL(__fls)
index 5335ba3c850ed3acdc074ffe639d3ddac101f2ad..33c0f8bb0f33de0c6beadd3dd8a9bef6253bcb10 100644 (file)
@@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
        if (!(pmd_val(pmd) & _PAGE_VALID))
                return 0;
 
-       if (write && !pmd_write(pmd))
+       if (!pmd_access_permitted(pmd, write))
                return 0;
 
        refs = 0;
@@ -114,7 +114,7 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
        if (!(pud_val(pud) & _PAGE_VALID))
                return 0;
 
-       if (write && !pud_write(pud))
+       if (!pud_access_permitted(pud, write))
                return 0;
 
        refs = 0;
diff --git a/arch/sparc/vdso/.gitignore b/arch/sparc/vdso/.gitignore
new file mode 100644 (file)
index 0000000..ef925b9
--- /dev/null
@@ -0,0 +1,3 @@
+vdso.lds
+vdso-image-*.c
+vdso2c
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
new file mode 100644 (file)
index 0000000..a6615d8
--- /dev/null
@@ -0,0 +1,149 @@
+#
+# Building vDSO images for sparc.
+#
+
+KBUILD_CFLAGS += $(DISABLE_LTO)
+
+VDSO64-$(CONFIG_SPARC64)       := y
+VDSOCOMPAT-$(CONFIG_COMPAT)    := y
+
+# files to link into the vdso
+vobjs-y := vdso-note.o vclock_gettime.o
+
+# files to link into kernel
+obj-y                          += vma.o
+
+# vDSO images to build
+vdso_img-$(VDSO64-y)           += 64
+vdso_img-$(VDSOCOMPAT-y)       += 32
+
+vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+
+$(obj)/vdso.o: $(obj)/vdso.so
+
+targets += vdso.lds $(vobjs-y)
+
+# Build the vDSO image C files and link them in.
+vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
+vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
+vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
+obj-y += $(vdso_img_objs)
+targets += $(vdso_img_cfiles)
+targets += $(vdso_img_sodbg)
+.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
+       $(vdso_img-y:%=$(obj)/vdso%.so)
+
+export CPPFLAGS_vdso.lds += -P -C
+
+VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+                       -Wl,--no-undefined \
+                       -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
+                       $(DISABLE_LTO)
+
+$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+       $(call if_changed,vdso)
+
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+hostprogs-y                    += vdso2c
+
+quiet_cmd_vdso2c = VDSO2C  $@
+define cmd_vdso2c
+       $(obj)/vdso2c $< $(<:%.dbg=%) $@
+endef
+
+$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
+       $(call if_changed,vdso2c)
+
+#
+# Don't omit frame pointers for ease of userspace debugging, but do
+# optimize sibling calls.
+#
+CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \
+       -m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \
+       -ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \
+       $(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \
+       -foptimize-sibling-calls -DBUILD_VDSO
+
+$(vobjs): KBUILD_CFLAGS += $(CFL)
+
+#
+# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+#
+CFLAGS_REMOVE_vdso-note.o = -pg
+CFLAGS_REMOVE_vclock_gettime.o = -pg
+
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg
+       $(call if_changed,objcopy)
+
+CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1
+
+#This makes sure the $(obj) subdirectory exists even though vdso32/
+#is not a kbuild sub-make subdirectory
+override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
+
+targets += vdso32/vdso32.lds
+targets += vdso32/vdso-note.o
+targets += vdso32/vclock_gettime.o
+
+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
+$(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+$(obj)/vdso32.so.dbg: asflags-$(CONFIG_SPARC64) += -m32
+
+KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7
+KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
+KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS_32 += -mv8plus
+$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+
+$(obj)/vdso32.so.dbg: FORCE \
+                       $(obj)/vdso32/vdso32.lds \
+                       $(obj)/vdso32/vclock_gettime.o \
+                       $(obj)/vdso32/vdso-note.o
+               $(call  if_changed,vdso)
+
+#
+# The DSO images are built using a special linker script.
+#
+quiet_cmd_vdso = VDSO    $@
+      cmd_vdso = $(CC) -nostdlib -o $@ \
+                      $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
+
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+       $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic
+GCOV_PROFILE := n
+
+#
+# Install the unstripped copies of vdso*.so.  If our toolchain supports
+# build-id, install .build-id links as well.
+#
+quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
+define cmd_vdso_install
+       cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+       if readelf -n $< |grep -q 'Build ID'; then \
+         buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+         first=`echo $$buildid | cut -b-2`; \
+         last=`echo $$buildid | cut -b3-`; \
+         mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+         ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+       fi
+endef
+
+vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
+
+$(MODLIB)/vdso: FORCE
+       @mkdir -p $(MODLIB)/vdso
+
+$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
+       $(call cmd,vdso_install)
+
+PHONY += vdso_install $(vdso_img_insttargets)
+vdso_install: $(vdso_img_insttargets) FORCE
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
new file mode 100644 (file)
index 0000000..3feb3d9
--- /dev/null
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2006 Andi Kleen, SUSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Fast user context implementation of clock_gettime, gettimeofday, and time.
+ *
+ * The code should have no internal unresolved relocations.
+ * Check with readelf after changing.
+ * Also alternative() doesn't work.
+ */
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+/* Disable profiling for userspace code: */
+#ifndef        DISABLE_BRANCH_PROFILING
+#define        DISABLE_BRANCH_PROFILING
+#endif
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/unistd.h>
+#include <asm/timex.h>
+#include <asm/clocksource.h>
+#include <asm/vvar.h>
+
+#undef TICK_PRIV_BIT
+#ifdef CONFIG_SPARC64
+#define        TICK_PRIV_BIT   (1UL << 63)
+#else
+#define        TICK_PRIV_BIT   (1ULL << 63)
+#endif
+
+#define SYSCALL_STRING                                                 \
+       "ta     0x6d;"                                                  \
+       "sub    %%g0, %%o0, %%o0;"                                      \
+
+#define SYSCALL_CLOBBERS                                               \
+       "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",                 \
+       "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",           \
+       "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",         \
+       "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",         \
+       "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",         \
+       "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",         \
+       "cc", "memory"
+
+/*
+ * Compute the vvar page's address in the process address space, and return it
+ * as a pointer to the vvar_data.
+ */
+static notrace noinline struct vvar_data *
+get_vvar_data(void)
+{
+       unsigned long ret;
+
+       /*
+        * vdso data page is the first vDSO page so grab the return address
+        * and move up a page to get to the data page.
+        */
+       ret = (unsigned long)__builtin_return_address(0);
+       ret &= ~(8192 - 1);
+       ret -= 8192;
+
+       return (struct vvar_data *) ret;
+}
+
+static notrace long
+vdso_fallback_gettime(long clock, struct timespec *ts)
+{
+       register long num __asm__("g1") = __NR_clock_gettime;
+       register long o0 __asm__("o0") = clock;
+       register long o1 __asm__("o1") = (long) ts;
+
+       __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
+                            "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
+       return o0;
+}
+
+static notrace __always_inline long
+vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       register long num __asm__("g1") = __NR_gettimeofday;
+       register long o0 __asm__("o0") = (long) tv;
+       register long o1 __asm__("o1") = (long) tz;
+
+       __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
+                            "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
+       return o0;
+}
+
+#ifdef CONFIG_SPARC64
+static notrace noinline u64
+vread_tick(void) {
+       u64     ret;
+
+       __asm__ __volatile__("rd        %%asr24, %0 \n"
+                            ".section  .vread_tick_patch, \"ax\" \n"
+                            "rd        %%tick, %0 \n"
+                            ".previous \n"
+                            : "=&r" (ret));
+       return ret & ~TICK_PRIV_BIT;
+}
+#else
+static notrace noinline u64
+vread_tick(void)
+{
+       unsigned int lo, hi;
+
+       __asm__ __volatile__("rd        %%asr24, %%g1\n\t"
+                            "srlx      %%g1, 32, %1\n\t"
+                            "srl       %%g1, 0, %0\n"
+                            ".section  .vread_tick_patch, \"ax\" \n"
+                            "rd        %%tick, %%g1\n"
+                            ".previous \n"
+                            : "=&r" (lo), "=&r" (hi)
+                            :
+                            : "g1");
+       return lo | ((u64)hi << 32);
+}
+#endif
+
+static notrace inline u64
+vgetsns(struct vvar_data *vvar)
+{
+       u64 v;
+       u64 cycles;
+
+       cycles = vread_tick();
+       v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
+       return v * vvar->clock.mult;
+}
+
+static notrace noinline int
+do_realtime(struct vvar_data *vvar, struct timespec *ts)
+{
+       unsigned long seq;
+       u64 ns;
+
+       ts->tv_nsec = 0;
+       do {
+               seq = vvar_read_begin(vvar);
+               ts->tv_sec = vvar->wall_time_sec;
+               ns = vvar->wall_time_snsec;
+               ns += vgetsns(vvar);
+               ns >>= vvar->clock.shift;
+       } while (unlikely(vvar_read_retry(vvar, seq)));
+
+       timespec_add_ns(ts, ns);
+
+       return 0;
+}
+
+static notrace noinline int
+do_monotonic(struct vvar_data *vvar, struct timespec *ts)
+{
+       unsigned long seq;
+       u64 ns;
+
+       ts->tv_nsec = 0;
+       do {
+               seq = vvar_read_begin(vvar);
+               ts->tv_sec = vvar->monotonic_time_sec;
+               ns = vvar->monotonic_time_snsec;
+               ns += vgetsns(vvar);
+               ns >>= vvar->clock.shift;
+       } while (unlikely(vvar_read_retry(vvar, seq)));
+
+       timespec_add_ns(ts, ns);
+
+       return 0;
+}
+
+static notrace noinline int
+do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
+{
+       unsigned long seq;
+
+       do {
+               seq = vvar_read_begin(vvar);
+               ts->tv_sec = vvar->wall_time_coarse_sec;
+               ts->tv_nsec = vvar->wall_time_coarse_nsec;
+       } while (unlikely(vvar_read_retry(vvar, seq)));
+       return 0;
+}
+
+static notrace noinline int
+do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
+{
+       unsigned long seq;
+
+       do {
+               seq = vvar_read_begin(vvar);
+               ts->tv_sec = vvar->monotonic_time_coarse_sec;
+               ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
+       } while (unlikely(vvar_read_retry(vvar, seq)));
+
+       return 0;
+}
+
+notrace int
+__vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+{
+       struct vvar_data *vvd = get_vvar_data();
+
+       switch (clock) {
+       case CLOCK_REALTIME:
+               if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+                       break;
+               return do_realtime(vvd, ts);
+       case CLOCK_MONOTONIC:
+               if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+                       break;
+               return do_monotonic(vvd, ts);
+       case CLOCK_REALTIME_COARSE:
+               return do_realtime_coarse(vvd, ts);
+       case CLOCK_MONOTONIC_COARSE:
+               return do_monotonic_coarse(vvd, ts);
+       }
+       /*
+        * Unknown clock ID ? Fall back to the syscall.
+        */
+       return vdso_fallback_gettime(clock, ts);
+}
+int
+clock_gettime(clockid_t, struct timespec *)
+       __attribute__((weak, alias("__vdso_clock_gettime")));
+
+notrace int
+__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       struct vvar_data *vvd = get_vvar_data();
+
+       if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
+               if (likely(tv != NULL)) {
+                       union tstv_t {
+                               struct timespec ts;
+                               struct timeval tv;
+                       } *tstv = (union tstv_t *) tv;
+                       do_realtime(vvd, &tstv->ts);
+                       /*
+                        * Assign before dividing to ensure that the division is
+                        * done in the type of tv_usec, not tv_nsec.
+                        *
+                        * There cannot be > 1 billion usec in a second:
+                        * do_realtime() has already distributed such overflow
+                        * into tv_sec.  So we can assign it to an int safely.
+                        */
+                       tstv->tv.tv_usec = tstv->ts.tv_nsec;
+                       tstv->tv.tv_usec /= 1000;
+               }
+               if (unlikely(tz != NULL)) {
+                       /* Avoid memcpy. Some old compilers fail to inline it */
+                       tz->tz_minuteswest = vvd->tz_minuteswest;
+                       tz->tz_dsttime = vvd->tz_dsttime;
+               }
+               return 0;
+       }
+       return vdso_fallback_gettimeofday(tv, tz);
+}
+int
+gettimeofday(struct timeval *, struct timezone *)
+       __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/sparc/vdso/vdso-layout.lds.S b/arch/sparc/vdso/vdso-layout.lds.S
new file mode 100644 (file)
index 0000000..f2c83ab
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Linker script for vDSO.  This is an ELF shared object prelinked to
+ * its virtual address, and with only one read-only segment.
+ * This script controls its layout.
+ */
+
+#if defined(BUILD_VDSO64)
+# define SHDR_SIZE 64
+#elif defined(BUILD_VDSO32)
+# define SHDR_SIZE 40
+#else
+# error unknown VDSO target
+#endif
+
+#define NUM_FAKE_SHDRS 7
+
+SECTIONS
+{
+       /*
+        * User/kernel shared data is before the vDSO.  This may be a little
+        * uglier than putting it after the vDSO, but it avoids issues with
+        * non-allocatable things that dangle past the end of the PT_LOAD
+        * segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries
+        */
+
+       vvar_start = . -8192;
+       vvar_data = vvar_start;
+
+       . = SIZEOF_HEADERS;
+
+       .hash           : { *(.hash) }                  :text
+       .gnu.hash       : { *(.gnu.hash) }
+       .dynsym         : { *(.dynsym) }
+       .dynstr         : { *(.dynstr) }
+       .gnu.version    : { *(.gnu.version) }
+       .gnu.version_d  : { *(.gnu.version_d) }
+       .gnu.version_r  : { *(.gnu.version_r) }
+
+       .dynamic        : { *(.dynamic) }               :text   :dynamic
+
+       .rodata         : {
+               *(.rodata*)
+               *(.data*)
+               *(.sdata*)
+               *(.got.plt) *(.got)
+               *(.gnu.linkonce.d.*)
+               *(.bss*)
+               *(.dynbss*)
+               *(.gnu.linkonce.b.*)
+
+               /*
+                * Ideally this would live in a C file: kept in here for
+                * compatibility with x86-64.
+                */
+               VDSO_FAKE_SECTION_TABLE_START = .;
+               . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
+               VDSO_FAKE_SECTION_TABLE_END = .;
+       }                                               :text
+
+       .fake_shstrtab  : { *(.fake_shstrtab) }         :text
+
+
+       .note           : { *(.note.*) }                :text   :note
+
+       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
+       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
+
+
+       /*
+        * Text is well-separated from actual data: there's plenty of
+        * stuff that isn't used at runtime in between.
+        */
+
+       .text           : { *(.text*) }                 :text   =0x90909090,
+
+       .vread_tick_patch : {
+               vread_tick_patch_start = .;
+               *(.vread_tick_patch)
+               vread_tick_patch_end = .;
+       }
+
+       /DISCARD/ : {
+               *(.discard)
+               *(.discard.*)
+               *(__bug_table)
+       }
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME        0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+       text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+       dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
+       note            PT_NOTE         FLAGS(4);               /* PF_R */
+       eh_frame_hdr    PT_GNU_EH_FRAME;
+}
diff --git a/arch/sparc/vdso/vdso-note.S b/arch/sparc/vdso/vdso-note.S
new file mode 100644 (file)
index 0000000..79a071e
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+       .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/sparc/vdso/vdso.lds.S b/arch/sparc/vdso/vdso.lds.S
new file mode 100644 (file)
index 0000000..f3caa29
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Linker script for 64-bit vDSO.
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define BUILD_VDSO64
+
+#include "vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+       LINUX_2.6 {
+       global:
+               clock_gettime;
+               __vdso_clock_gettime;
+               gettimeofday;
+               __vdso_gettimeofday;
+       local: *;
+       };
+}
diff --git a/arch/sparc/vdso/vdso2c.c b/arch/sparc/vdso/vdso2c.c
new file mode 100644 (file)
index 0000000..9f5b1cd
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * vdso2c - A vdso image preparation tool
+ * Copyright (c) 2014 Andy Lutomirski and others
+ * Licensed under the GPL v2
+ *
+ * vdso2c requires stripped and unstripped input.  It would be trivial
+ * to fully strip the input in here, but, for reasons described below,
+ * we need to write a section table.  Doing this is more or less
+ * equivalent to dropping all non-allocatable sections, but it's
+ * easier to let objcopy handle that instead of doing it ourselves.
+ * If we ever need to do something fancier than what objcopy provides,
+ * it would be straightforward to add here.
+ *
+ * We keep a section table for a few reasons:
+ *
+ * Binutils has issues debugging the vDSO: it reads the section table to
+ * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
+ * would break build-id if we removed the section table.  Binutils
+ * also requires that shstrndx != 0.  See:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
+ *
+ * elfutils might not look for PT_NOTE if there is a section table at
+ * all.  I don't know whether this matters for any practical purpose.
+ *
+ * For simplicity, rather than hacking up a partial section table, we
+ * just write a mostly complete one.  We omit non-dynamic symbols,
+ * though, since they're rather large.
+ *
+ * Once binutils gets fixed, we might be able to drop this for all but
+ * the 64-bit vdso, since build-id only works in kernel RPMs, and
+ * systems that update to new enough kernel RPMs will likely update
+ * binutils in sync.  build-id has never worked for home-built kernel
+ * RPMs without manual symlinking, and I suspect that no one ever does
+ * that.
+ */
+
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <err.h>
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <tools/be_byteshift.h>
+
+#include <linux/elf.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+const char *outfilename;
+
+/* Symbols that we need in vdso2c. */
+enum {
+       sym_vvar_start,
+       sym_VDSO_FAKE_SECTION_TABLE_START,
+       sym_VDSO_FAKE_SECTION_TABLE_END,
+       sym_vread_tick,
+       sym_vread_tick_patch_start,
+       sym_vread_tick_patch_end
+};
+
+struct vdso_sym {
+       const char *name;
+       int export;
+};
+
+struct vdso_sym required_syms[] = {
+       [sym_vvar_start] = {"vvar_start", 1},
+       [sym_VDSO_FAKE_SECTION_TABLE_START] = {
+               "VDSO_FAKE_SECTION_TABLE_START", 0
+       },
+       [sym_VDSO_FAKE_SECTION_TABLE_END] = {
+               "VDSO_FAKE_SECTION_TABLE_END", 0
+       },
+       [sym_vread_tick] = {"vread_tick", 1},
+       [sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1},
+       [sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1}
+};
+
+__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
+static void fail(const char *format, ...)
+{
+       va_list ap;
+
+       va_start(ap, format);
+       fprintf(stderr, "Error: ");
+       vfprintf(stderr, format, ap);
+       if (outfilename)
+               unlink(outfilename);
+       exit(1);
+       va_end(ap);
+}
+
+/*
+ * Evil macros for big-endian reads and writes
+ */
+#define GBE(x, bits, ifnot)                                            \
+       __builtin_choose_expr(                                          \
+               (sizeof(*(x)) == bits/8),                               \
+               (__typeof__(*(x)))get_unaligned_be##bits(x), ifnot)
+
+#define LAST_GBE(x)                                                    \
+       __builtin_choose_expr(sizeof(*(x)) == 1, *(x), (void)(0))
+
+#define GET_BE(x)                                                      \
+       GBE(x, 64, GBE(x, 32, GBE(x, 16, LAST_GBE(x))))
+
+#define PBE(x, val, bits, ifnot)                                       \
+       __builtin_choose_expr(                                          \
+               (sizeof(*(x)) == bits/8),                               \
+               put_unaligned_be##bits((val), (x)), ifnot)
+
+#define LAST_PBE(x, val)                                               \
+       __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), (void)(0))
+
+#define PUT_BE(x, val)                                 \
+       PBE(x, val, 64, PBE(x, val, 32, PBE(x, val, 16, LAST_PBE(x, val))))
+
+#define NSYMS ARRAY_SIZE(required_syms)
+
+#define BITSFUNC3(name, bits, suffix) name##bits##suffix
+#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
+#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, )
+
+#define INT_BITS BITSFUNC2(int, ELF_BITS, _t)
+
+#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
+#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
+#define ELF_BITS 64
+#include "vdso2c.h"
+#undef ELF_BITS
+
+#define ELF_BITS 32
+#include "vdso2c.h"
+#undef ELF_BITS
+
+static void go(void *raw_addr, size_t raw_len,
+              void *stripped_addr, size_t stripped_len,
+              FILE *outfile, const char *name)
+{
+       Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr;
+
+       if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
+               go64(raw_addr, raw_len, stripped_addr, stripped_len,
+                    outfile, name);
+       } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
+               go32(raw_addr, raw_len, stripped_addr, stripped_len,
+                    outfile, name);
+       } else {
+               fail("unknown ELF class\n");
+       }
+}
+
+static void map_input(const char *name, void **addr, size_t *len, int prot)
+{
+       off_t tmp_len;
+
+       int fd = open(name, O_RDONLY);
+
+       if (fd == -1)
+               err(1, "%s", name);
+
+       tmp_len = lseek(fd, 0, SEEK_END);
+       if (tmp_len == (off_t)-1)
+               err(1, "lseek");
+       *len = (size_t)tmp_len;
+
+       *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0);
+       if (*addr == MAP_FAILED)
+               err(1, "mmap");
+
+       close(fd);
+}
+
+int main(int argc, char **argv)
+{
+       size_t raw_len, stripped_len;
+       void *raw_addr, *stripped_addr;
+       FILE *outfile;
+       char *name, *tmp;
+       int namelen;
+
+       if (argc != 4) {
+               printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n");
+               return 1;
+       }
+
+       /*
+        * Figure out the struct name.  If we're writing to a .so file,
+        * generate raw output insted.
+        */
+       name = strdup(argv[3]);
+       namelen = strlen(name);
+       if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
+               name = NULL;
+       } else {
+               tmp = strrchr(name, '/');
+               if (tmp)
+                       name = tmp + 1;
+               tmp = strchr(name, '.');
+               if (tmp)
+                       *tmp = '\0';
+               for (tmp = name; *tmp; tmp++)
+                       if (*tmp == '-')
+                               *tmp = '_';
+       }
+
+       map_input(argv[1], &raw_addr, &raw_len, PROT_READ);
+       map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ);
+
+       outfilename = argv[3];
+       outfile = fopen(outfilename, "w");
+       if (!outfile)
+               err(1, "%s", argv[2]);
+
+       go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name);
+
+       munmap(raw_addr, raw_len);
+       munmap(stripped_addr, stripped_len);
+       fclose(outfile);
+
+       return 0;
+}
diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h
new file mode 100644 (file)
index 0000000..808decb
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * This file is included up to twice from vdso2c.c.  It generates code for
+ * 32-bit and 64-bit vDSOs.  We will eventually need both for 64-bit builds,
+ * since 32-bit vDSOs will then be built for 32-bit userspace.
+ */
+
+static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+                        void *stripped_addr, size_t stripped_len,
+                        FILE *outfile, const char *name)
+{
+       int found_load = 0;
+       unsigned long load_size = -1;  /* Work around bogus warning */
+       unsigned long mapping_size;
+       int i;
+       unsigned long j;
+
+       ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
+       ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+       ELF(Dyn) *dyn = 0, *dyn_end = 0;
+       INT_BITS syms[NSYMS] = {};
+
+       ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff));
+
+       /* Walk the segment table. */
+       for (i = 0; i < GET_BE(&hdr->e_phnum); i++) {
+               if (GET_BE(&pt[i].p_type) == PT_LOAD) {
+                       if (found_load)
+                               fail("multiple PT_LOAD segs\n");
+
+                       if (GET_BE(&pt[i].p_offset) != 0 ||
+                           GET_BE(&pt[i].p_vaddr) != 0)
+                               fail("PT_LOAD in wrong place\n");
+
+                       if (GET_BE(&pt[i].p_memsz) != GET_BE(&pt[i].p_filesz))
+                               fail("cannot handle memsz != filesz\n");
+
+                       load_size = GET_BE(&pt[i].p_memsz);
+                       found_load = 1;
+               } else if (GET_BE(&pt[i].p_type) == PT_DYNAMIC) {
+                       dyn = raw_addr + GET_BE(&pt[i].p_offset);
+                       dyn_end = raw_addr + GET_BE(&pt[i].p_offset) +
+                               GET_BE(&pt[i].p_memsz);
+               }
+       }
+       if (!found_load)
+               fail("no PT_LOAD seg\n");
+
+       if (stripped_len < load_size)
+               fail("stripped input is too short\n");
+
+       /* Walk the dynamic table */
+       for (i = 0; dyn + i < dyn_end &&
+                    GET_BE(&dyn[i].d_tag) != DT_NULL; i++) {
+               typeof(dyn[i].d_tag) tag = GET_BE(&dyn[i].d_tag);
+               typeof(dyn[i].d_un.d_val) val = GET_BE(&dyn[i].d_un.d_val);
+
+               if ((tag == DT_RELSZ || tag == DT_RELASZ) && (val != 0))
+                       fail("vdso image contains dynamic relocations\n");
+       }
+
+       /* Walk the section table */
+       for (i = 0; i < GET_BE(&hdr->e_shnum); i++) {
+               ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) +
+                       GET_BE(&hdr->e_shentsize) * i;
+               if (GET_BE(&sh->sh_type) == SHT_SYMTAB)
+                       symtab_hdr = sh;
+       }
+
+       if (!symtab_hdr)
+               fail("no symbol table\n");
+
+       strtab_hdr = raw_addr + GET_BE(&hdr->e_shoff) +
+               GET_BE(&hdr->e_shentsize) * GET_BE(&symtab_hdr->sh_link);
+
+       /* Walk the symbol table */
+       for (i = 0;
+            i < GET_BE(&symtab_hdr->sh_size) / GET_BE(&symtab_hdr->sh_entsize);
+            i++) {
+               int k;
+
+               ELF(Sym) *sym = raw_addr + GET_BE(&symtab_hdr->sh_offset) +
+                       GET_BE(&symtab_hdr->sh_entsize) * i;
+               const char *name = raw_addr + GET_BE(&strtab_hdr->sh_offset) +
+                       GET_BE(&sym->st_name);
+
+               for (k = 0; k < NSYMS; k++) {
+                       if (!strcmp(name, required_syms[k].name)) {
+                               if (syms[k]) {
+                                       fail("duplicate symbol %s\n",
+                                            required_syms[k].name);
+                               }
+
+                               /*
+                                * Careful: we use negative addresses, but
+                                * st_value is unsigned, so we rely
+                                * on syms[k] being a signed type of the
+                                * correct width.
+                                */
+                               syms[k] = GET_BE(&sym->st_value);
+                       }
+               }
+       }
+
+       /* Validate mapping addresses. */
+       if (syms[sym_vvar_start] % 8192)
+               fail("vvar_begin must be a multiple of 8192\n");
+
+       if (!name) {
+               fwrite(stripped_addr, stripped_len, 1, outfile);
+               return;
+       }
+
+       mapping_size = (stripped_len + 8191) / 8192 * 8192;
+
+       fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
+       fprintf(outfile, "#include <linux/cache.h>\n");
+       fprintf(outfile, "#include <asm/vdso.h>\n");
+       fprintf(outfile, "\n");
+       fprintf(outfile,
+               "static unsigned char raw_data[%lu] __ro_after_init __aligned(8192)= {",
+               mapping_size);
+       for (j = 0; j < stripped_len; j++) {
+               if (j % 10 == 0)
+                       fprintf(outfile, "\n\t");
+               fprintf(outfile, "0x%02X, ",
+                       (int)((unsigned char *)stripped_addr)[j]);
+       }
+       fprintf(outfile, "\n};\n\n");
+
+       fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name);
+       fprintf(outfile, "\t.data = raw_data,\n");
+       fprintf(outfile, "\t.size = %lu,\n", mapping_size);
+       for (i = 0; i < NSYMS; i++) {
+               if (required_syms[i].export && syms[i])
+                       fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
+                               required_syms[i].name, (int64_t)syms[i]);
+       }
+       fprintf(outfile, "};\n");
+}
diff --git a/arch/sparc/vdso/vdso32/.gitignore b/arch/sparc/vdso/vdso32/.gitignore
new file mode 100644 (file)
index 0000000..e45fba9
--- /dev/null
@@ -0,0 +1 @@
+vdso32.lds
diff --git a/arch/sparc/vdso/vdso32/vclock_gettime.c b/arch/sparc/vdso/vdso32/vclock_gettime.c
new file mode 100644 (file)
index 0000000..026abb3
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define        BUILD_VDSO32
+
+#ifndef        CONFIG_CC_OPTIMIZE_FOR_SIZE
+#undef CONFIG_OPTIMIZE_INLINING
+#endif
+
+#ifdef CONFIG_SPARC64
+
+/*
+ * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
+ * configuration
+ */
+#undef CONFIG_64BIT
+#undef CONFIG_SPARC64
+#define        BUILD_VDSO32_64
+#define        CONFIG_32BIT
+#undef CONFIG_QUEUED_RWLOCKS
+#undef CONFIG_QUEUED_SPINLOCKS
+
+#endif
+
+#include "../vclock_gettime.c"
diff --git a/arch/sparc/vdso/vdso32/vdso-note.S b/arch/sparc/vdso/vdso32/vdso-note.S
new file mode 100644 (file)
index 0000000..e234983
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO
+ * text. Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+       .long   LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/sparc/vdso/vdso32/vdso32.lds.S b/arch/sparc/vdso/vdso32/vdso32.lds.S
new file mode 100644 (file)
index 0000000..53575ee
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Linker script for sparc32 vDSO
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define        BUILD_VDSO32
+#include "../vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+       LINUX_2.6 {
+       global:
+               clock_gettime;
+               __vdso_clock_gettime;
+               gettimeofday;
+               __vdso_gettimeofday;
+       local: *;
+       };
+}
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
new file mode 100644 (file)
index 0000000..0a6f500
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ * Set up the VMAs to tell the VM about the vDSO.
+ * Copyright 2007 Andi Kleen, SUSE Labs.
+ * Subject to the GPL, v.2
+ */
+
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/random.h>
+#include <linux/elf.h>
+#include <asm/vdso.h>
+#include <asm/vvar.h>
+#include <asm/page.h>
+
+unsigned int __read_mostly vdso_enabled = 1;
+
+static struct vm_special_mapping vvar_mapping = {
+       .name = "[vvar]"
+};
+
+#ifdef CONFIG_SPARC64
+static struct vm_special_mapping vdso_mapping64 = {
+       .name = "[vdso]"
+};
+#endif
+
+#ifdef CONFIG_COMPAT
+static struct vm_special_mapping vdso_mapping32 = {
+       .name = "[vdso]"
+};
+#endif
+
+struct vvar_data *vvar_data;
+
+#define        SAVE_INSTR_SIZE 4
+
+/*
+ * Allocate pages for the vdso and vvar, and copy in the vdso text from the
+ * kernel image.
+ */
+int __init init_vdso_image(const struct vdso_image *image,
+               struct vm_special_mapping *vdso_mapping)
+{
+       int i;
+       struct page *dp, **dpp = NULL;
+       int dnpages = 0;
+       struct page *cp, **cpp = NULL;
+       int cnpages = (image->size) / PAGE_SIZE;
+
+       /*
+        * First, the vdso text.  This is initialied data, an integral number of
+        * pages long.
+        */
+       if (WARN_ON(image->size % PAGE_SIZE != 0))
+               goto oom;
+
+       cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
+       vdso_mapping->pages = cpp;
+
+       if (!cpp)
+               goto oom;
+
+       if (vdso_fix_stick) {
+               /*
+                * If the system uses %tick instead of %stick, patch the VDSO
+                * with instruction reading %tick instead of %stick.
+                */
+               unsigned int j, k = SAVE_INSTR_SIZE;
+               unsigned char *data = image->data;
+
+               for (j = image->sym_vread_tick_patch_start;
+                    j < image->sym_vread_tick_patch_end; j++) {
+
+                       data[image->sym_vread_tick + k] = data[j];
+                       k++;
+               }
+       }
+
+       for (i = 0; i < cnpages; i++) {
+               cp = alloc_page(GFP_KERNEL);
+               if (!cp)
+                       goto oom;
+               cpp[i] = cp;
+               copy_page(page_address(cp), image->data + i * PAGE_SIZE);
+       }
+
+       /*
+        * Now the vvar page.  This is uninitialized data.
+        */
+
+       if (vvar_data == NULL) {
+               dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
+               if (WARN_ON(dnpages != 1))
+                       goto oom;
+               dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
+               vvar_mapping.pages = dpp;
+
+               if (!dpp)
+                       goto oom;
+
+               dp = alloc_page(GFP_KERNEL);
+               if (!dp)
+                       goto oom;
+
+               dpp[0] = dp;
+               vvar_data = page_address(dp);
+               memset(vvar_data, 0, PAGE_SIZE);
+
+               vvar_data->seq = 0;
+       }
+
+       return 0;
+ oom:
+       if (cpp != NULL) {
+               for (i = 0; i < cnpages; i++) {
+                       if (cpp[i] != NULL)
+                               __free_page(cpp[i]);
+               }
+               kfree(cpp);
+               vdso_mapping->pages = NULL;
+       }
+
+       if (dpp != NULL) {
+               for (i = 0; i < dnpages; i++) {
+                       if (dpp[i] != NULL)
+                               __free_page(dpp[i]);
+               }
+               kfree(dpp);
+               vvar_mapping.pages = NULL;
+       }
+
+       pr_warn("Cannot allocate vdso\n");
+       vdso_enabled = 0;
+       return -ENOMEM;
+}
+
+static int __init init_vdso(void)
+{
+       int err = 0;
+#ifdef CONFIG_SPARC64
+       err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
+       if (err)
+               return err;
+#endif
+
+#ifdef CONFIG_COMPAT
+       err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
+#endif
+       return err;
+
+}
+subsys_initcall(init_vdso);
+
+struct linux_binprm;
+
+/* Shuffle the vdso up a bit, randomly. */
+static unsigned long vdso_addr(unsigned long start, unsigned int len)
+{
+       unsigned int offset;
+
+       /* This loses some more bits than a modulo, but is cheaper */
+       offset = get_random_int() & (PTRS_PER_PTE - 1);
+       return start + (offset << PAGE_SHIFT);
+}
+
+static int map_vdso(const struct vdso_image *image,
+               struct vm_special_mapping *vdso_mapping)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long text_start, addr = 0;
+       int ret = 0;
+
+       down_write(&mm->mmap_sem);
+
+       /*
+        * First, get an unmapped region: then randomize it, and make sure that
+        * region is free.
+        */
+       if (current->flags & PF_RANDOMIZE) {
+               addr = get_unmapped_area(NULL, 0,
+                                        image->size - image->sym_vvar_start,
+                                        0, 0);
+               if (IS_ERR_VALUE(addr)) {
+                       ret = addr;
+                       goto up_fail;
+               }
+               addr = vdso_addr(addr, image->size - image->sym_vvar_start);
+       }
+       addr = get_unmapped_area(NULL, addr,
+                                image->size - image->sym_vvar_start, 0, 0);
+       if (IS_ERR_VALUE(addr)) {
+               ret = addr;
+               goto up_fail;
+       }
+
+       text_start = addr - image->sym_vvar_start;
+       current->mm->context.vdso = (void __user *)text_start;
+
+       /*
+        * MAYWRITE to allow gdb to COW and set breakpoints
+        */
+       vma = _install_special_mapping(mm,
+                                      text_start,
+                                      image->size,
+                                      VM_READ|VM_EXEC|
+                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                      vdso_mapping);
+
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto up_fail;
+       }
+
+       vma = _install_special_mapping(mm,
+                                      addr,
+                                      -image->sym_vvar_start,
+                                      VM_READ|VM_MAYREAD,
+                                      &vvar_mapping);
+
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               do_munmap(mm, text_start, image->size, NULL);
+       }
+
+up_fail:
+       if (ret)
+               current->mm->context.vdso = NULL;
+
+       up_write(&mm->mmap_sem);
+       return ret;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+
+       if (!vdso_enabled)
+               return 0;
+
+#if defined CONFIG_COMPAT
+       if (!(is_32bit_task()))
+               return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
+       else
+               return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
+#else
+               return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
+#endif
+
+}
+
+static __init int vdso_setup(char *s)
+{
+       int err;
+       unsigned long val;
+
+       err = kstrtoul(s, 10, &val);
+       vdso_enabled = val;
+       return err;
+}
+__setup("vdso=", vdso_setup);
index 2a26cc4fefc27fda65d15be3b3a0b719231ad609..adfa21b18488f215b7b56cfc43daa52877c77dd3 100644 (file)
@@ -475,7 +475,6 @@ static inline void pmd_clear(pmd_t *pmdp)
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_huge_page(pmd)     pte_huge(pmd_pte(pmd))
 #define pmd_mkhuge(pmd)                pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-#define __HAVE_ARCH_PMD_WRITE
 
 #define pfn_pmd(pfn, pgprot)   pte_pmd(pfn_pte((pfn), (pgprot)))
 #define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
index b11d5fcd2c41219fb9a9929d90f352177db67a7c..635a0a4596f00c5020060b47cf78483750639336 100644 (file)
@@ -29,12 +29,6 @@ static inline int cpu_to_node(int cpu)
        return cpu_2_node[cpu];
 }
 
-/*
- * Returns the number of the node containing Node 'node'.
- * This architecture is flat, so it is a pretty simple function!
- */
-#define parent_node(node) (node)
-
 /* Returns a bitmask of CPUs on Node 'node'. */
 static inline const struct cpumask *cpumask_of_node(int node)
 {
index d9280482a2f89c7c559b76448c6da8358ccda82c..c68add8df3aef6232976dccbb2399270495e5814 100644 (file)
@@ -10,7 +10,6 @@ config UML
        select HAVE_DEBUG_KMEMLEAK
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
-       select GENERIC_IO
        select GENERIC_CLOCKEVENTS
        select HAVE_GCC_PLUGINS
        select TTY # Needed for line.c
index df3276d6bfe33b48ff30c0d672c3029ed650678b..8eed3f94bfc774de5e3f344590f8889a999dea9c 100644 (file)
@@ -1804,14 +1804,20 @@ config X86_SMAP
          If unsure, say Y.
 
 config X86_INTEL_UMIP
-       def_bool n
+       def_bool y
        depends on CPU_SUP_INTEL
        prompt "Intel User Mode Instruction Prevention" if EXPERT
        ---help---
          The User Mode Instruction Prevention (UMIP) is a security
          feature in newer Intel processors. If enabled, a general
-         protection fault is issued if the instructions SGDT, SLDT,
-         SIDT, SMSW and STR are executed in user mode.
+         protection fault is issued if the SGDT, SLDT, SIDT, SMSW
+         or STR instructions are executed in user mode. These instructions
+         unnecessarily expose information about the hardware state.
+
+         The vast majority of applications do not use these instructions.
+         For the very few that do, software emulation is provided in
+         specific cases in protected and virtual-8086 modes. Emulated
+         results are dummy.
 
 config X86_INTEL_MPX
        prompt "Intel MPX (Memory Protection Extensions)"
index a63fbc25ce84bd4ddd43cd1867ade39a98df83df..8199a6187251d0179276c96b5260a2226397ef06 100644 (file)
@@ -171,7 +171,6 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
 static void mem_avoid_memmap(char *str)
 {
        static int i;
-       int rc;
 
        if (i >= MAX_MEMMAP_REGIONS)
                return;
@@ -219,7 +218,7 @@ static int handle_mem_memmap(void)
                return 0;
 
        tmp_cmdline = malloc(len + 1);
-       if (!tmp_cmdline )
+       if (!tmp_cmdline)
                error("Failed to allocate space for tmp_cmdline");
 
        memcpy(tmp_cmdline, args, len);
@@ -363,7 +362,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
        cmd_line |= boot_params->hdr.cmd_line_ptr;
        /* Calculate size of cmd_line. */
        ptr = (char *)(unsigned long)cmd_line;
-       for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+       for (cmd_line_size = 0; ptr[cmd_line_size++];)
                ;
        mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
        mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
index a2b30ec69497277f39ef56e0dfad604170f78c19..f81d50d7ceacdefa06d61482687937096c68421c 100644 (file)
@@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64)
 END(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
-.macro TRACE_IRQS_IRETQ
+.macro TRACE_IRQS_FLAGS flags:req
 #ifdef CONFIG_TRACE_IRQFLAGS
-       bt      $9, EFLAGS(%rsp)                /* interrupts off? */
+       bt      $9, \flags              /* interrupts off? */
        jnc     1f
        TRACE_IRQS_ON
 1:
 #endif
 .endm
 
+.macro TRACE_IRQS_IRETQ
+       TRACE_IRQS_FLAGS EFLAGS(%rsp)
+.endm
+
 /*
  * When dynamic function tracer is enabled it will add a breakpoint
  * to all locations that it is about to modify, sync CPUs, update
@@ -148,8 +152,6 @@ ENTRY(entry_SYSCALL_64)
        movq    %rsp, PER_CPU_VAR(rsp_scratch)
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
-       TRACE_IRQS_OFF
-
        /* Construct struct pt_regs on stack */
        pushq   $__USER_DS                      /* pt_regs->ss */
        pushq   PER_CPU_VAR(rsp_scratch)        /* pt_regs->sp */
@@ -170,6 +172,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
        sub     $(6*8), %rsp                    /* pt_regs->bp, bx, r12-15 not saved */
        UNWIND_HINT_REGS extra=0
 
+       TRACE_IRQS_OFF
+
        /*
         * If we need to do entry work or if we guess we'll need to do
         * exit work, go straight to the slow path.
@@ -943,11 +947,13 @@ ENTRY(native_load_gs_index)
        FRAME_BEGIN
        pushfq
        DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
+       TRACE_IRQS_OFF
        SWAPGS
 .Lgs_change:
        movl    %edi, %gs
 2:     ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
        SWAPGS
+       TRACE_IRQS_FLAGS (%rsp)
        popfq
        FRAME_END
        ret
index c366c0adeb40da1ae311a2906087c4a73f730af4..1943aebadede9b94327e530b266ecdc3a0f5dcec 100644 (file)
@@ -130,10 +130,6 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
 VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
 
-# This makes sure the $(obj) subdirectory exists even though vdso32/
-# is not a kbuild sub-make subdirectory.
-override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
-
 targets += vdso32/vdso32.lds
 targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
 targets += vdso32/vclock_gettime.o
index 43445da30ceab12323772e81c95f0dfb3ba8cfa3..09c26a4f139c125e000675689ebc983acd8ab91a 100644 (file)
@@ -3734,6 +3734,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t,       "event=0x3c,in_tx=1");
 EVENT_ATTR_STR(cycles-ct,      cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
 
 static struct attribute *hsw_events_attrs[] = {
+       EVENT_PTR(mem_ld_hsw),
+       EVENT_PTR(mem_st_hsw),
+       EVENT_PTR(td_slots_issued),
+       EVENT_PTR(td_slots_retired),
+       EVENT_PTR(td_fetch_bubbles),
+       EVENT_PTR(td_total_slots),
+       EVENT_PTR(td_total_slots_scale),
+       EVENT_PTR(td_recovery_bubbles),
+       EVENT_PTR(td_recovery_bubbles_scale),
+       NULL
+};
+
+static struct attribute *hsw_tsx_events_attrs[] = {
        EVENT_PTR(tx_start),
        EVENT_PTR(tx_commit),
        EVENT_PTR(tx_abort),
@@ -3746,18 +3759,16 @@ static struct attribute *hsw_events_attrs[] = {
        EVENT_PTR(el_conflict),
        EVENT_PTR(cycles_t),
        EVENT_PTR(cycles_ct),
-       EVENT_PTR(mem_ld_hsw),
-       EVENT_PTR(mem_st_hsw),
-       EVENT_PTR(td_slots_issued),
-       EVENT_PTR(td_slots_retired),
-       EVENT_PTR(td_fetch_bubbles),
-       EVENT_PTR(td_total_slots),
-       EVENT_PTR(td_total_slots_scale),
-       EVENT_PTR(td_recovery_bubbles),
-       EVENT_PTR(td_recovery_bubbles_scale),
        NULL
 };
 
+static __init struct attribute **get_hsw_events_attrs(void)
+{
+       return boot_cpu_has(X86_FEATURE_RTM) ?
+               merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
+               hsw_events_attrs;
+}
+
 static ssize_t freeze_on_smi_show(struct device *cdev,
                                  struct device_attribute *attr,
                                  char *buf)
@@ -4186,7 +4197,7 @@ __init int intel_pmu_init(void)
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.cpu_events = get_hsw_events_attrs();
                x86_pmu.lbr_double_abort = true;
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
@@ -4225,7 +4236,7 @@ __init int intel_pmu_init(void)
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.cpu_events = get_hsw_events_attrs();
                x86_pmu.limit_period = bdw_limit_period;
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
@@ -4283,7 +4294,7 @@ __init int intel_pmu_init(void)
                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
                        hsw_format_attr : nhm_format_attr;
                extra_attr = merge_attr(extra_attr, skl_format_attr);
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.cpu_events = get_hsw_events_attrs();
                intel_pmu_pebs_data_source_skl(
                        boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
                pr_cont("Skylake events, ");
index d45e06346f14d8636f1b4348a84a6e503012c686..7874c980d56921961328fa0212da9dae61458789 100644 (file)
@@ -975,10 +975,10 @@ static void uncore_pci_remove(struct pci_dev *pdev)
        int i, phys_id, pkg;
 
        phys_id = uncore_pcibus_to_physid(pdev->bus);
-       pkg = topology_phys_to_logical_pkg(phys_id);
 
        box = pci_get_drvdata(pdev);
        if (!box) {
+               pkg = topology_phys_to_logical_pkg(phys_id);
                for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
                        if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
                                uncore_extra_pci_dev[pkg].dev[i] = NULL;
@@ -994,7 +994,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
                return;
 
        pci_set_drvdata(pdev, NULL);
-       pmu->boxes[pkg] = NULL;
+       pmu->boxes[box->pkgid] = NULL;
        if (atomic_dec_return(&pmu->activeboxes) == 0)
                uncore_pmu_unregister(pmu);
        uncore_box_exit(box);
index 4364191e7c6b2904a443bf2a522ca4445b7f0a12..414dc7e7c950c6cf7279b51f4161b04e6e868aad 100644 (file)
@@ -100,7 +100,7 @@ struct intel_uncore_extra_reg {
 
 struct intel_uncore_box {
        int pci_phys_id;
-       int pkgid;
+       int pkgid;      /* Logical package ID */
        int n_active;   /* number of active events */
        int n_events;
        int cpu;        /* cpu to collect events */
index 95cb19f4e06f03376b03a5a27c59f7e6612167e1..6d8044ab10607b6c668bfee0d6366266401e7e2f 100644 (file)
@@ -1057,7 +1057,7 @@ static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_eve
 
        if (reg1->idx != EXTRA_REG_NONE) {
                int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
-               int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
+               int pkg = box->pkgid;
                struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
 
                if (filter_pdev) {
@@ -3035,11 +3035,19 @@ static struct intel_uncore_type *bdx_msr_uncores[] = {
        NULL,
 };
 
+/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
+static struct event_constraint bdx_uncore_pcu_constraints[] = {
+       EVENT_CONSTRAINT(0x80, 0xe, 0x80),
+       EVENT_CONSTRAINT_END
+};
+
 void bdx_uncore_cpu_init(void)
 {
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
        uncore_msr_uncores = bdx_msr_uncores;
+
+       hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
 }
 
 static struct intel_uncore_type bdx_uncore_ha = {
index 3a091cea36c5a118d953fd25c897989270c6f0e4..0d157d2a1e2aef98b1e69c452f27d330a5fd7179 100644 (file)
@@ -309,6 +309,7 @@ static inline int mmap_is_ia32(void)
 extern unsigned long task_size_32bit(void);
 extern unsigned long task_size_64bit(int full_addr_space);
 extern unsigned long get_mmap_base(int is_legacy);
+extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
 
 #ifdef CONFIG_X86_32
 
index b80e46733909c981aa9125c055d7e1f9f22b2409..2851077b6051b257e710dd437e2cdba7b1516e56 100644 (file)
@@ -99,14 +99,6 @@ struct irq_alloc_info {
                        void            *dmar_data;
                };
 #endif
-#ifdef CONFIG_HT_IRQ
-               struct {
-                       int             ht_pos;
-                       int             ht_idx;
-                       struct pci_dev  *ht_dev;
-                       void            *ht_update;
-               };
-#endif
 #ifdef CONFIG_X86_UV
                struct {
                        int             uv_limit;
diff --git a/arch/x86/include/asm/hypertransport.h b/arch/x86/include/asm/hypertransport.h
deleted file mode 100644 (file)
index 5d55df3..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_HYPERTRANSPORT_H
-#define _ASM_X86_HYPERTRANSPORT_H
-
-/*
- * Constants for x86 Hypertransport Interrupts.
- */
-
-#define HT_IRQ_LOW_BASE                        0xf8000000
-
-#define HT_IRQ_LOW_VECTOR_SHIFT                16
-#define HT_IRQ_LOW_VECTOR_MASK         0x00ff0000
-#define HT_IRQ_LOW_VECTOR(v)                                           \
-       (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
-
-#define HT_IRQ_LOW_DEST_ID_SHIFT       8
-#define HT_IRQ_LOW_DEST_ID_MASK                0x0000ff00
-#define HT_IRQ_LOW_DEST_ID(v)                                          \
-       (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
-
-#define HT_IRQ_LOW_DM_PHYSICAL         0x0000000
-#define HT_IRQ_LOW_DM_LOGICAL          0x0000040
-
-#define HT_IRQ_LOW_RQEOI_EDGE          0x0000000
-#define HT_IRQ_LOW_RQEOI_LEVEL         0x0000020
-
-
-#define HT_IRQ_LOW_MT_FIXED            0x0000000
-#define HT_IRQ_LOW_MT_ARBITRATED       0x0000004
-#define HT_IRQ_LOW_MT_SMI              0x0000008
-#define HT_IRQ_LOW_MT_NMI              0x000000c
-#define HT_IRQ_LOW_MT_INIT             0x0000010
-#define HT_IRQ_LOW_MT_STARTUP          0x0000014
-#define HT_IRQ_LOW_MT_EXTINT           0x0000018
-#define HT_IRQ_LOW_MT_LINT1            0x000008c
-#define HT_IRQ_LOW_MT_LINT0            0x0000098
-
-#define HT_IRQ_LOW_IRQ_MASKED          0x0000001
-
-
-#define HT_IRQ_HIGH_DEST_ID_SHIFT      0
-#define HT_IRQ_HIGH_DEST_ID_MASK       0x00ffffff
-#define HT_IRQ_HIGH_DEST_ID(v)                                         \
-       ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
-
-#endif /* _ASM_X86_HYPERTRANSPORT_H */
index e1d3b4ce8a925350df55f898dbdb9cf0f416881f..2b6ccf2c49f11c9c54fbe448ea7d33d997b99ca6 100644 (file)
@@ -18,6 +18,6 @@
 void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
 int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);
 unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
-char insn_get_code_seg_params(struct pt_regs *regs);
+int insn_get_code_seg_params(struct pt_regs *regs);
 
 #endif /* _ASM_X86_INSN_EVAL_H */
index 93ae8aee178075da0110e026c546ab2db8674775..95e948627fd04878883041543c3b5f13703ceaa9 100644 (file)
@@ -111,6 +111,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
 
 #endif
 
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+
 /**
  *     virt_to_phys    -       map virtual addresses to physical
  *     @address: address to remap
index f695cc6b8e1f4476263d4477063aec247accde0d..139feef467f7e298c6f9db57c43facc64f5468b6 100644 (file)
@@ -56,10 +56,4 @@ extern void arch_init_msi_domain(struct irq_domain *domain);
 static inline void arch_init_msi_domain(struct irq_domain *domain) { }
 #endif
 
-#ifdef CONFIG_HT_IRQ
-extern void arch_init_htirq_domain(struct irq_domain *domain);
-#else
-static inline void arch_init_htirq_domain(struct irq_domain *domain) { }
-#endif
-
 #endif
index 1bfb99770c34197b6c0627897753d282b3e5c378..977de5fb968be412862de87aeb95136bae69e620 100644 (file)
@@ -1161,7 +1161,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
 static inline int emulate_instruction(struct kvm_vcpu *vcpu,
                        int emulation_type)
 {
-       return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+       return x86_emulate_instruction(vcpu, 0,
+                       emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
 }
 
 void kvm_enable_efer_bits(u64);
index 09f9e1e00e3bd30b5869b126f2ab11be49388f05..95e2dfd755218ccfaf6417b44c822b545a35568e 100644 (file)
@@ -1061,7 +1061,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
                                  unsigned long address, pmd_t *pmdp);
 
 
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        return pmd_flags(pmd) & _PAGE_RW;
@@ -1088,6 +1088,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
        clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
 }
 
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_RW;
+}
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
index 2db7cf720b04b2d067df4f3138f2914de0e5cc0f..cc16fa882e3e760a40351cf3e7476ac9f25ffe00 100644 (file)
@@ -132,6 +132,7 @@ struct cpuinfo_x86 {
        /* Index into per_cpu list: */
        u16                     cpu_index;
        u32                     microcode;
+       unsigned                initialized : 1;
 } __randomize_layout;
 
 struct cpuid_regs {
index ef9e02e614d0691ac0c5cdba8fb2d878a59e416e..f4c463df8b0886816c5b32a989396fe5e0f714f0 100644 (file)
@@ -342,13 +342,12 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e
 #ifdef CONFIG_X86_IO_APIC
 #define MP_ISA_BUS             0
 
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+                                               u8 trigger, u32 gsi);
+
 static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
                                          u32 gsi)
 {
-       int ioapic;
-       int pin;
-       struct mpc_intsrc mp_irq;
-
        /*
         * Check bus_irq boundary.
         */
@@ -357,14 +356,6 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
                return;
        }
 
-       /*
-        * Convert 'gsi' to 'ioapic.pin'.
-        */
-       ioapic = mp_find_ioapic(gsi);
-       if (ioapic < 0)
-               return;
-       pin = mp_find_ioapic_pin(ioapic, gsi);
-
        /*
         * TBD: This check is for faulty timer entries, where the override
         *      erroneously sets the trigger to level, resulting in a HUGE
@@ -373,16 +364,8 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
        if ((bus_irq == 0) && (trigger == 3))
                trigger = 1;
 
-       mp_irq.type = MP_INTSRC;
-       mp_irq.irqtype = mp_INT;
-       mp_irq.irqflag = (trigger << 2) | polarity;
-       mp_irq.srcbus = MP_ISA_BUS;
-       mp_irq.srcbusirq = bus_irq;     /* IRQ */
-       mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */
-       mp_irq.dstirq = pin;    /* INTIN# */
-
-       mp_save_irq(&mp_irq);
-
+       if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
+               return;
        /*
         * Reset default identity mapping if gsi is also an legacy IRQ,
         * otherwise there will be more than one entry with the same GSI
@@ -429,6 +412,34 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
        return 0;
 }
 
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+                                               u8 trigger, u32 gsi)
+{
+       struct mpc_intsrc mp_irq;
+       int ioapic, pin;
+
+       /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
+       ioapic = mp_find_ioapic(gsi);
+       if (ioapic < 0) {
+               pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
+               return ioapic;
+       }
+
+       pin = mp_find_ioapic_pin(ioapic, gsi);
+
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (trigger << 2) | polarity;
+       mp_irq.srcbus = MP_ISA_BUS;
+       mp_irq.srcbusirq = bus_irq;
+       mp_irq.dstapic = mpc_ioapic_id(ioapic);
+       mp_irq.dstirq = pin;
+
+       mp_save_irq(&mp_irq);
+
+       return 0;
+}
+
 static int __init
 acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
 {
@@ -473,7 +484,11 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
        if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
                polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 
-       mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+       if (bus_irq < NR_IRQS_LEGACY)
+               mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+       else
+               mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
+
        acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 
        /*
index a9e08924927ef6da6f4620942b3ceba654d1578d..a6fcaf16cdbf9b26a36378bf764fb955bf6ab861 100644 (file)
@@ -12,7 +12,6 @@ obj-y                         += hw_nmi.o
 
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
 obj-$(CONFIG_PCI_MSI)          += msi.o
-obj-$(CONFIG_HT_IRQ)           += htirq.o
 obj-$(CONFIG_SMP)              += ipi.o
 
 ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
deleted file mode 100644 (file)
index b07075d..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Support Hypertransport IRQ
- *
- * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
- *     Moved from arch/x86/kernel/apic/io_apic.c.
- * Jiang Liu <jiang.liu@linux.intel.com>
- *     Add support of hierarchical irqdomain
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/htirq.h>
-#include <asm/irqdomain.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/hypertransport.h>
-
-static struct irq_domain *htirq_domain;
-
-/*
- * Hypertransport interrupt support
- */
-static int
-ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
-       struct irq_data *parent = data->parent_data;
-       int ret;
-
-       ret = parent->chip->irq_set_affinity(parent, mask, force);
-       if (ret >= 0) {
-               struct ht_irq_msg msg;
-               struct irq_cfg *cfg = irqd_cfg(data);
-
-               fetch_ht_irq_msg(data->irq, &msg);
-               msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK |
-                                   HT_IRQ_LOW_DEST_ID_MASK);
-               msg.address_lo |= HT_IRQ_LOW_VECTOR(cfg->vector) |
-                                 HT_IRQ_LOW_DEST_ID(cfg->dest_apicid);
-               msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-               msg.address_hi |= HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
-               write_ht_irq_msg(data->irq, &msg);
-       }
-
-       return ret;
-}
-
-static struct irq_chip ht_irq_chip = {
-       .name                   = "PCI-HT",
-       .irq_mask               = mask_ht_irq,
-       .irq_unmask             = unmask_ht_irq,
-       .irq_ack                = irq_chip_ack_parent,
-       .irq_set_affinity       = ht_set_affinity,
-       .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
-                             unsigned int nr_irqs, void *arg)
-{
-       struct ht_irq_cfg *ht_cfg;
-       struct irq_alloc_info *info = arg;
-       struct pci_dev *dev;
-       irq_hw_number_t hwirq;
-       int ret;
-
-       if (nr_irqs > 1 || !info)
-               return -EINVAL;
-
-       dev = info->ht_dev;
-       hwirq = (info->ht_idx & 0xFF) |
-               PCI_DEVID(dev->bus->number, dev->devfn) << 8 |
-               (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 24;
-       if (irq_find_mapping(domain, hwirq) > 0)
-               return -EEXIST;
-
-       ht_cfg = kmalloc(sizeof(*ht_cfg), GFP_KERNEL);
-       if (!ht_cfg)
-               return -ENOMEM;
-
-       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
-       if (ret < 0) {
-               kfree(ht_cfg);
-               return ret;
-       }
-
-       /* Initialize msg to a value that will never match the first write. */
-       ht_cfg->msg.address_lo = 0xffffffff;
-       ht_cfg->msg.address_hi = 0xffffffff;
-       ht_cfg->dev = info->ht_dev;
-       ht_cfg->update = info->ht_update;
-       ht_cfg->pos = info->ht_pos;
-       ht_cfg->idx = 0x10 + (info->ht_idx * 2);
-       irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg,
-                           handle_edge_irq, ht_cfg, "edge");
-
-       return 0;
-}
-
-static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
-                             unsigned int nr_irqs)
-{
-       struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
-
-       BUG_ON(nr_irqs != 1);
-       kfree(irq_data->chip_data);
-       irq_domain_free_irqs_top(domain, virq, nr_irqs);
-}
-
-static int htirq_domain_activate(struct irq_domain *domain,
-                                struct irq_data *irq_data, bool early)
-{
-       struct ht_irq_msg msg;
-       struct irq_cfg *cfg = irqd_cfg(irq_data);
-
-       msg.address_hi = HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
-       msg.address_lo =
-               HT_IRQ_LOW_BASE |
-               HT_IRQ_LOW_DEST_ID(cfg->dest_apicid) |
-               HT_IRQ_LOW_VECTOR(cfg->vector) |
-               ((apic->irq_dest_mode == 0) ?
-                       HT_IRQ_LOW_DM_PHYSICAL :
-                       HT_IRQ_LOW_DM_LOGICAL) |
-               HT_IRQ_LOW_RQEOI_EDGE |
-               ((apic->irq_delivery_mode != dest_LowestPrio) ?
-                       HT_IRQ_LOW_MT_FIXED :
-                       HT_IRQ_LOW_MT_ARBITRATED) |
-               HT_IRQ_LOW_IRQ_MASKED;
-       write_ht_irq_msg(irq_data->irq, &msg);
-       return 0;
-}
-
-static void htirq_domain_deactivate(struct irq_domain *domain,
-                                   struct irq_data *irq_data)
-{
-       struct ht_irq_msg msg;
-
-       memset(&msg, 0, sizeof(msg));
-       write_ht_irq_msg(irq_data->irq, &msg);
-}
-
-static const struct irq_domain_ops htirq_domain_ops = {
-       .alloc          = htirq_domain_alloc,
-       .free           = htirq_domain_free,
-       .activate       = htirq_domain_activate,
-       .deactivate     = htirq_domain_deactivate,
-};
-
-void __init arch_init_htirq_domain(struct irq_domain *parent)
-{
-       struct fwnode_handle *fn;
-
-       if (disable_apic)
-               return;
-
-       fn = irq_domain_alloc_named_fwnode("PCI-HT");
-       if (!fn)
-               goto warn;
-
-       htirq_domain = irq_domain_create_tree(fn, &htirq_domain_ops, NULL);
-       irq_domain_free_fwnode(fn);
-       if (!htirq_domain)
-               goto warn;
-
-       htirq_domain->parent = parent;
-       return;
-
-warn:
-       pr_warn("Failed to initialize irqdomain for HTIRQ.\n");
-}
-
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
-                     ht_irq_update_t *update)
-{
-       struct irq_alloc_info info;
-
-       if (!htirq_domain)
-               return -ENOSYS;
-
-       init_irq_alloc_info(&info, NULL);
-       info.ht_idx = idx;
-       info.ht_pos = pos;
-       info.ht_dev = dev;
-       info.ht_update = update;
-
-       return irq_domain_alloc_irqs(htirq_domain, 1, dev_to_node(&dev->dev),
-                                    &info);
-}
-
-void arch_teardown_ht_irq(unsigned int irq)
-{
-       irq_domain_free_irqs(irq, 1);
-}
index 05c85e693a5d4c9d9e17da19337f2318f29aa621..6a823a25eaff03787660bd1f92e587362259e54d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
+ * Local APIC related interfaces to support IOAPIC, MSI, etc.
  *
  * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  *     Moved from arch/x86/kernel/apic/io_apic.c.
@@ -601,7 +601,7 @@ int __init arch_probe_nr_irqs(void)
                nr_irqs = NR_VECTORS * nr_cpu_ids;
 
        nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+#if defined(CONFIG_PCI_MSI)
        /*
         * for MSI and HT dyn irq
         */
@@ -663,7 +663,6 @@ int __init arch_early_irq_init(void)
        irq_set_default_host(x86_vector_domain);
 
        arch_init_msi_domain(x86_vector_domain);
-       arch_init_htirq_domain(x86_vector_domain);
 
        BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
 
index 13ae9e5eec2f5c8e40f89f2ecd077fc852d8d32c..fa998ca8aa5aa5b4899dbe8a57c5b543f927009e 100644 (file)
@@ -341,6 +341,8 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c)
 
        cr4_set_bits(X86_CR4_UMIP);
 
+       pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
+
        return;
 
 out:
index 410c5dadcee31930f8c30ddaeb1b4c940835a081..3a4b12809ab5f810f5a8657bfcc3f7521450e645 100644 (file)
@@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
 }
 
 static unsigned long mpf_base;
+static bool mpf_found;
 
 static unsigned long __init get_mpc_size(unsigned long physptr)
 {
@@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early)
        if (!smp_found_config)
                return;
 
-       if (!mpf_base)
+       if (!mpf_found)
                return;
 
        if (acpi_lapic && early)
@@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
                        smp_found_config = 1;
 #endif
                        mpf_base = base;
+                       mpf_found = true;
 
                        pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
                                base, base + sizeof(*mpf) - 1, mpf);
@@ -858,7 +860,7 @@ static int __init update_mp_table(void)
        if (!enable_update_mptable)
                return 0;
 
-       if (!mpf_base)
+       if (!mpf_found)
                return 0;
 
        mpf = early_memremap(mpf_base, sizeof(*mpf));
index 5f59e6bee123ffb324ec12a02d1921a7029ddc96..3d01df7d7cf60cdbe1342fe84006405712394663 100644 (file)
@@ -101,9 +101,6 @@ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
 /* Logical package management. We might want to allocate that dynamically */
-static int *physical_to_logical_pkg __read_mostly;
-static unsigned long *physical_package_map __read_mostly;;
-static unsigned int max_physical_pkg_id __read_mostly;
 unsigned int __max_logical_packages __read_mostly;
 EXPORT_SYMBOL(__max_logical_packages);
 static unsigned int logical_packages __read_mostly;
@@ -280,6 +277,25 @@ static void notrace start_secondary(void *unused)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
+/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+ */
+int topology_phys_to_logical_pkg(unsigned int phys_pkg)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+               if (c->initialized && c->phys_proc_id == phys_pkg)
+                       return c->logical_proc_id;
+       }
+       return -1;
+}
+EXPORT_SYMBOL(topology_phys_to_logical_pkg);
+
 /**
  * topology_update_package_map - Update the physical to logical package map
  * @pkg:       The physical package id as retrieved via CPUID
@@ -287,102 +303,23 @@ static void notrace start_secondary(void *unused)
  */
 int topology_update_package_map(unsigned int pkg, unsigned int cpu)
 {
-       unsigned int new;
+       int new;
 
-       /* Called from early boot ? */
-       if (!physical_package_map)
-               return 0;
-
-       if (pkg >= max_physical_pkg_id)
-               return -EINVAL;
-
-       /* Set the logical package id */
-       if (test_and_set_bit(pkg, physical_package_map))
+       /* Already available somewhere? */
+       new = topology_phys_to_logical_pkg(pkg);
+       if (new >= 0)
                goto found;
 
-       if (logical_packages >= __max_logical_packages) {
-               pr_warn("Package %u of CPU %u exceeds BIOS package data %u.\n",
-                       logical_packages, cpu, __max_logical_packages);
-               return -ENOSPC;
-       }
-
        new = logical_packages++;
        if (new != pkg) {
                pr_info("CPU %u Converting physical %u to logical package %u\n",
                        cpu, pkg, new);
        }
-       physical_to_logical_pkg[pkg] = new;
-
 found:
-       cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
+       cpu_data(cpu).logical_proc_id = new;
        return 0;
 }
 
-/**
- * topology_phys_to_logical_pkg - Map a physical package id to a logical
- *
- * Returns logical package id or -1 if not found
- */
-int topology_phys_to_logical_pkg(unsigned int phys_pkg)
-{
-       if (phys_pkg >= max_physical_pkg_id)
-               return -1;
-       return physical_to_logical_pkg[phys_pkg];
-}
-EXPORT_SYMBOL(topology_phys_to_logical_pkg);
-
-static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
-{
-       unsigned int ncpus;
-       size_t size;
-
-       /*
-        * Today neither Intel nor AMD support heterogenous systems. That
-        * might change in the future....
-        *
-        * While ideally we'd want '* smp_num_siblings' in the below @ncpus
-        * computation, this won't actually work since some Intel BIOSes
-        * report inconsistent HT data when they disable HT.
-        *
-        * In particular, they reduce the APIC-IDs to only include the cores,
-        * but leave the CPUID topology to say there are (2) siblings.
-        * This means we don't know how many threads there will be until
-        * after the APIC enumeration.
-        *
-        * By not including this we'll sometimes over-estimate the number of
-        * logical packages by the amount of !present siblings, but this is
-        * still better than MAX_LOCAL_APIC.
-        *
-        * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited
-        * on the command line leading to a similar issue as the HT disable
-        * problem because the hyperthreads are usually enumerated after the
-        * primary cores.
-        */
-       ncpus = boot_cpu_data.x86_max_cores;
-       if (!ncpus) {
-               pr_warn("x86_max_cores == zero !?!?");
-               ncpus = 1;
-       }
-
-       __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
-       logical_packages = 0;
-
-       /*
-        * Possibly larger than what we need as the number of apic ids per
-        * package can be smaller than the actual used apic ids.
-        */
-       max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
-       size = max_physical_pkg_id * sizeof(unsigned int);
-       physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
-       memset(physical_to_logical_pkg, 0xff, size);
-       size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
-       physical_package_map = kzalloc(size, GFP_KERNEL);
-
-       pr_info("Max logical packages: %u\n", __max_logical_packages);
-
-       topology_update_package_map(c->phys_proc_id, cpu);
-}
-
 void __init smp_store_boot_cpu_info(void)
 {
        int id = 0; /* CPU 0 */
@@ -390,7 +327,8 @@ void __init smp_store_boot_cpu_info(void)
 
        *c = boot_cpu_data;
        c->cpu_index = id;
-       smp_init_package_map(c, id);
+       topology_update_package_map(c->phys_proc_id, id);
+       c->initialized = true;
 }
 
 /*
@@ -401,13 +339,16 @@ void smp_store_cpu_info(int id)
 {
        struct cpuinfo_x86 *c = &cpu_data(id);
 
-       *c = boot_cpu_data;
+       /* Copy boot_cpu_data only on the first bringup */
+       if (!c->initialized)
+               *c = boot_cpu_data;
        c->cpu_index = id;
        /*
         * During boot time, CPU0 has this setup already. Save the info when
         * bringing up AP or offlined CPU0.
         */
        identify_secondary_cpu(c);
+       c->initialized = true;
 }
 
 static bool
@@ -1356,7 +1297,16 @@ void __init native_smp_prepare_boot_cpu(void)
 
 void __init native_smp_cpus_done(unsigned int max_cpus)
 {
+       int ncpus;
+
        pr_debug("Boot done\n");
+       /*
+        * Today neither Intel nor AMD support heterogenous systems so
+        * extrapolate the boot cpu's data to all packages.
+        */
+       ncpus = cpu_data(0).booted_cores * smp_num_siblings;
+       __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+       pr_info("Max logical packages: %u\n", __max_logical_packages);
 
        if (x86_has_numa_in_package)
                set_sched_topology(x86_numa_in_package_topology);
index a63fe77b32179662353ba51f31d8097980d6fa74..676774b9bb8d1300837c9771782ae6f28a4d8bca 100644 (file)
@@ -188,6 +188,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       /* No address checking. See comment at mmap_address_hint_valid() */
        if (flags & MAP_FIXED)
                return addr;
 
@@ -197,12 +198,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
        /* requesting a specific address */
        if (addr) {
-               addr = PAGE_ALIGN(addr);
+               addr &= PAGE_MASK;
+               if (!mmap_address_hint_valid(addr, len))
+                       goto get_unmapped_area;
+
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+               if (!vma || addr + len <= vm_start_gap(vma))
                        return addr;
        }
+get_unmapped_area:
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
index 6ba82be68cffa25ed9beef59162de6d9318078bf..f44ce0fb35832aa0cfd7619fff7ad27b1742540e 100644 (file)
 
 #define        UMIP_INST_SGDT  0       /* 0F 01 /0 */
 #define        UMIP_INST_SIDT  1       /* 0F 01 /1 */
-#define        UMIP_INST_SMSW  3       /* 0F 01 /4 */
+#define        UMIP_INST_SMSW  2       /* 0F 01 /4 */
+#define        UMIP_INST_SLDT  3       /* 0F 00 /0 */
+#define        UMIP_INST_STR   4       /* 0F 00 /1 */
+
+const char * const umip_insns[5] = {
+       [UMIP_INST_SGDT] = "SGDT",
+       [UMIP_INST_SIDT] = "SIDT",
+       [UMIP_INST_SMSW] = "SMSW",
+       [UMIP_INST_SLDT] = "SLDT",
+       [UMIP_INST_STR] = "STR",
+};
+
+#define umip_pr_err(regs, fmt, ...) \
+       umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__)
+#define umip_pr_warning(regs, fmt, ...) \
+       umip_printk(regs, KERN_WARNING, fmt,  ##__VA_ARGS__)
+
+/**
+ * umip_printk() - Print a rate-limited message
+ * @regs:      Register set with the context in which the warning is printed
+ * @log_level: Kernel log level to print the message
+ * @fmt:       The text string to print
+ *
+ * Print the text contained in @fmt. The print rate is limited to bursts of 5
+ * messages every two minutes. The purpose of this customized version of
+ * printk() is to print messages when user space processes use any of the
+ * UMIP-protected instructions. Thus, the printed text is prepended with the
+ * task name and process ID number of the current task as well as the
+ * instruction and stack pointers in @regs as seen when entering kernel mode.
+ *
+ * Returns:
+ *
+ * None.
+ */
+static __printf(3, 4)
+void umip_printk(const struct pt_regs *regs, const char *log_level,
+                const char *fmt, ...)
+{
+       /* Bursts of 5 messages every two minutes */
+       static DEFINE_RATELIMIT_STATE(ratelimit, 2 * 60 * HZ, 5);
+       struct task_struct *tsk = current;
+       struct va_format vaf;
+       va_list args;
+
+       if (!__ratelimit(&ratelimit))
+               return;
+
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk("%s" pr_fmt("%s[%d] ip:%lx sp:%lx: %pV"), log_level, tsk->comm,
+              task_pid_nr(tsk), regs->ip, regs->sp, &vaf);
+       va_end(args);
+}
 
 /**
  * identify_insn() - Identify a UMIP-protected instruction
@@ -118,10 +171,16 @@ static int identify_insn(struct insn *insn)
                default:
                        return -EINVAL;
                }
+       } else if (insn->opcode.bytes[1] == 0x0) {
+               if (X86_MODRM_REG(insn->modrm.value) == 0)
+                       return UMIP_INST_SLDT;
+               else if (X86_MODRM_REG(insn->modrm.value) == 1)
+                       return UMIP_INST_STR;
+               else
+                       return -EINVAL;
+       } else {
+               return -EINVAL;
        }
-
-       /* SLDT AND STR are not emulated */
-       return -EINVAL;
 }
 
 /**
@@ -228,10 +287,8 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs)
        if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV)))
                return;
 
-       pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n",
-                          tsk->comm, task_pid_nr(tsk), regs->ip,
-                          regs->sp, X86_PF_USER | X86_PF_WRITE,
-                          regs->ip);
+       umip_pr_err(regs, "segfault in emulation. error%x\n",
+                   X86_PF_USER | X86_PF_WRITE);
 }
 
 /**
@@ -262,15 +319,11 @@ bool fixup_umip_exception(struct pt_regs *regs)
        unsigned char buf[MAX_INSN_SIZE];
        void __user *uaddr;
        struct insn insn;
-       char seg_defs;
+       int seg_defs;
 
        if (!regs)
                return false;
 
-       /* Do not emulate 64-bit processes. */
-       if (user_64bit_mode(regs))
-               return false;
-
        /*
         * If not in user-space long mode, a custom code segment could be in
         * use. This is true in protected mode (if the process defined a local
@@ -322,6 +375,15 @@ bool fixup_umip_exception(struct pt_regs *regs)
        if (umip_inst < 0)
                return false;
 
+       umip_pr_warning(regs, "%s instruction cannot be used by applications.\n",
+                       umip_insns[umip_inst]);
+
+       /* Do not emulate SLDT, STR or user long mode processes. */
+       if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs))
+               return false;
+
+       umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n");
+
        if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size))
                return false;
 
index cdc70a3a65838b10d558c3d0b14bcdf4d9e996d2..c2cea6651279f706f488cf51a523301e6de4ae77 100644 (file)
@@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
        [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
        [CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
        [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
-       [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+       [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
        [CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
        [CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
        [CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
index 8079d141792af91994421d15c19c26d3bd386c59..e7d04d0c8008d1a1d69966105ad855351a1f474f 100644 (file)
@@ -4014,6 +4014,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
                                   fxstate_size(ctxt));
 }
 
+/*
+ * FXRSTOR might restore XMM registers not provided by the guest. Fill
+ * in the host registers (via FXSAVE) instead, so they won't be modified.
+ * (preemption has to stay disabled until FXRSTOR).
+ *
+ * Use noinline to keep the stack for other functions called by callers small.
+ */
+static noinline int fxregs_fixup(struct fxregs_state *fx_state,
+                                const size_t used_size)
+{
+       struct fxregs_state fx_tmp;
+       int rc;
+
+       rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
+       memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
+              __fxstate_size(16) - used_size);
+
+       return rc;
+}
+
 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
 {
        struct fxregs_state fx_state;
@@ -4024,19 +4044,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
+       size = fxstate_size(ctxt);
+       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+
        ctxt->ops->get_fpu(ctxt);
 
-       size = fxstate_size(ctxt);
        if (size < __fxstate_size(16)) {
-               rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+               rc = fxregs_fixup(&fx_state, size);
                if (rc != X86EMUL_CONTINUE)
                        goto out;
        }
 
-       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
-       if (rc != X86EMUL_CONTINUE)
-               goto out;
-
        if (fx_state.mxcsr >> 16) {
                rc = emulate_gp(ctxt, 0);
                goto out;
@@ -5000,6 +5020,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        bool op_prefix = false;
        bool has_seg_override = false;
        struct opcode opcode;
+       u16 dummy;
+       struct desc_struct desc;
 
        ctxt->memop.type = OP_NONE;
        ctxt->memopp = NULL;
@@ -5018,6 +5040,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        switch (mode) {
        case X86EMUL_MODE_REAL:
        case X86EMUL_MODE_VM86:
+               def_op_bytes = def_ad_bytes = 2;
+               ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
+               if (desc.d)
+                       def_op_bytes = def_ad_bytes = 4;
+               break;
        case X86EMUL_MODE_PROT16:
                def_op_bytes = def_ad_bytes = 2;
                break;
index bdff437acbcb7ebc3307523edd848fb7db009c39..4e822ad363f37f613d14ab94f35609bcf3539bf7 100644 (file)
@@ -209,12 +209,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
 
        old_irr = ioapic->irr;
        ioapic->irr |= mask;
-       if (edge)
+       if (edge) {
                ioapic->irr_delivered &= ~mask;
-       if ((edge && old_irr == ioapic->irr) ||
-           (!edge && entry.fields.remote_irr)) {
-               ret = 0;
-               goto out;
+               if (old_irr == ioapic->irr) {
+                       ret = 0;
+                       goto out;
+               }
        }
 
        ret = ioapic_service(ioapic, irq, line_status);
@@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
                    index == RTC_GSI) {
                        if (kvm_apic_match_dest(vcpu, NULL, 0,
                                     e->fields.dest_id, e->fields.dest_mode) ||
-                           (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
-                            kvm_apic_pending_eoi(vcpu, e->fields.vector)))
+                           kvm_apic_pending_eoi(vcpu, e->fields.vector))
                                __set_bit(e->fields.vector,
                                          ioapic_handled_vectors);
                }
@@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 {
        unsigned index;
        bool mask_before, mask_after;
+       int old_remote_irr, old_delivery_status;
        union kvm_ioapic_redirect_entry *e;
 
        switch (ioapic->ioregsel) {
@@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                        return;
                e = &ioapic->redirtbl[index];
                mask_before = e->fields.mask;
+               /* Preserve read-only fields */
+               old_remote_irr = e->fields.remote_irr;
+               old_delivery_status = e->fields.delivery_status;
                if (ioapic->ioregsel & 1) {
                        e->bits &= 0xffffffff;
                        e->bits |= (u64) val << 32;
                } else {
                        e->bits &= ~0xffffffffULL;
                        e->bits |= (u32) val;
-                       e->fields.remote_irr = 0;
                }
+               e->fields.remote_irr = old_remote_irr;
+               e->fields.delivery_status = old_delivery_status;
+
+               /*
+                * Some OSes (Linux, Xen) assume that Remote IRR bit will
+                * be cleared by IOAPIC hardware when the entry is configured
+                * as edge-triggered. This behavior is used to simulate an
+                * explicit EOI on IOAPICs that don't have the EOI register.
+                */
+               if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
+                       e->fields.remote_irr = 0;
+
                mask_after = e->fields.mask;
                if (mask_before != mask_after)
                        kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
@@ -324,7 +338,9 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
        struct kvm_lapic_irq irqe;
        int ret;
 
-       if (entry->fields.mask)
+       if (entry->fields.mask ||
+           (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
+           entry->fields.remote_irr))
                return -1;
 
        ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
index 943acbf00c69d8f423289116bc363159144f883a..e2c1fb8d35cea28af684d4ba76d70a5e2e12e9a5 100644 (file)
@@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
        recalculate_apic_map(apic->vcpu->kvm);
 }
 
+static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
+{
+       return ((id >> 4) << 16) | (1 << (id & 0xf));
+}
+
 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 {
-       u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+       u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 
        WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
 
@@ -2245,6 +2250,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
 {
        if (apic_x2apic_mode(vcpu->arch.apic)) {
                u32 *id = (u32 *)(s->regs + APIC_ID);
+               u32 *ldr = (u32 *)(s->regs + APIC_LDR);
 
                if (vcpu->kvm->arch.x2apic_format) {
                        if (*id != vcpu->vcpu_id)
@@ -2255,6 +2261,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
                        else
                                *id <<= 24;
                }
+
+               /* In x2APIC mode, the LDR is fixed and based on the id */
+               if (set)
+                       *ldr = kvm_apic_calc_x2apic_ldr(*id);
        }
 
        return 0;
index b71daed3cca29dc8b1c4ccc4ba34e927dccbd5ea..eb714f1cdf7eee4ca9036005c3ab72ef9228ae9b 100644 (file)
@@ -361,6 +361,7 @@ static void recalc_intercepts(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *c, *h;
        struct nested_state *g;
+       u32 h_intercept_exceptions;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 
@@ -371,9 +372,14 @@ static void recalc_intercepts(struct vcpu_svm *svm)
        h = &svm->nested.hsave->control;
        g = &svm->nested;
 
+       /* No need to intercept #UD if L1 doesn't intercept it */
+       h_intercept_exceptions =
+               h->intercept_exceptions & ~(1U << UD_VECTOR);
+
        c->intercept_cr = h->intercept_cr | g->intercept_cr;
        c->intercept_dr = h->intercept_dr | g->intercept_dr;
-       c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+       c->intercept_exceptions =
+               h_intercept_exceptions | g->intercept_exceptions;
        c->intercept = h->intercept | g->intercept;
 }
 
@@ -2196,7 +2202,10 @@ static int ud_interception(struct vcpu_svm *svm)
 {
        int er;
 
+       WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
        er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+       if (er == EMULATE_USER_EXIT)
+               return 0;
        if (er != EMULATE_DONE)
                kvm_queue_exception(&svm->vcpu, UD_VECTOR);
        return 1;
@@ -3671,6 +3680,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        u32 ecx = msr->index;
        u64 data = msr->data;
        switch (ecx) {
+       case MSR_IA32_CR_PAT:
+               if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+                       return 1;
+               vcpu->arch.pat = data;
+               svm->vmcb->save.g_pat = data;
+               mark_dirty(svm->vmcb, VMCB_NPT);
+               break;
        case MSR_IA32_TSC:
                kvm_write_tsc(vcpu, msr);
                break;
index 7c3522a989d0b37713a802be82ee1f265fe64c9a..4704aaf6d19e2ea36e3d12ddc0c345bc8ed3632f 100644 (file)
@@ -70,6 +70,9 @@ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
 static bool __read_mostly enable_vpid = 1;
 module_param_named(vpid, enable_vpid, bool, 0444);
 
+static bool __read_mostly enable_vnmi = 1;
+module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
+
 static bool __read_mostly flexpriority_enabled = 1;
 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
 
@@ -202,6 +205,10 @@ struct loaded_vmcs {
        bool nmi_known_unmasked;
        unsigned long vmcs_host_cr3;    /* May not match real cr3 */
        unsigned long vmcs_host_cr4;    /* May not match real cr4 */
+       /* Support for vnmi-less CPUs */
+       int soft_vnmi_blocked;
+       ktime_t entry_time;
+       s64 vnmi_blocked_time;
        struct list_head loaded_vmcss_on_cpu_link;
 };
 
@@ -1291,6 +1298,11 @@ static inline bool cpu_has_vmx_invpcid(void)
                SECONDARY_EXEC_ENABLE_INVPCID;
 }
 
+static inline bool cpu_has_virtual_nmis(void)
+{
+       return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
+}
+
 static inline bool cpu_has_vmx_wbinvd_exit(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -1348,11 +1360,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
                (vmcs12->secondary_vm_exec_control & bit);
 }
 
-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
-{
-       return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
-}
-
 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
 {
        return vmcs12->pin_based_vm_exec_control &
@@ -1880,7 +1887,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 {
        u32 eb;
 
-       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+       eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) |
             (1u << DB_VECTOR) | (1u << AC_VECTOR);
        if ((vcpu->guest_debug &
             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1898,6 +1905,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
         */
        if (is_guest_mode(vcpu))
                eb |= get_vmcs12(vcpu)->exception_bitmap;
+       else
+               eb |= 1u << UD_VECTOR;
 
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -3712,9 +3721,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                                &_vmexit_control) < 0)
                return -EIO;
 
-       min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
-               PIN_BASED_VIRTUAL_NMIS;
-       opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
+       min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
+       opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
+                PIN_BASED_VMX_PREEMPTION_TIMER;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
                                &_pin_based_exec_control) < 0)
                return -EIO;
@@ -5232,6 +5241,10 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
 
        if (!kvm_vcpu_apicv_active(&vmx->vcpu))
                pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
+
+       if (!enable_vnmi)
+               pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
+
        /* Enable the preemption timer dynamically */
        pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
        return pin_based_exec_ctrl;
@@ -5589,7 +5602,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
                vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
        }
 
-       vmcs_writel(GUEST_RFLAGS, 0x02);
+       kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
        kvm_rip_write(vcpu, 0xfff0);
 
        vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -5666,7 +5679,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
 
 static void enable_nmi_window(struct kvm_vcpu *vcpu)
 {
-       if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+       if (!enable_vnmi ||
+           vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
                enable_irq_window(vcpu);
                return;
        }
@@ -5706,6 +5720,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (!enable_vnmi) {
+               /*
+                * Tracking the NMI-blocked state in software is built upon
+                * finding the next open IRQ window. This, in turn, depends on
+                * well-behaving guests: They have to keep IRQs disabled at
+                * least as long as the NMI handler runs. Otherwise we may
+                * cause NMI nesting, maybe breaking the guest. But as this is
+                * highly unlikely, we can live with the residual risk.
+                */
+               vmx->loaded_vmcs->soft_vnmi_blocked = 1;
+               vmx->loaded_vmcs->vnmi_blocked_time = 0;
+       }
+
        ++vcpu->stat.nmi_injections;
        vmx->loaded_vmcs->nmi_known_unmasked = false;
 
@@ -5724,6 +5751,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        bool masked;
 
+       if (!enable_vnmi)
+               return vmx->loaded_vmcs->soft_vnmi_blocked;
        if (vmx->loaded_vmcs->nmi_known_unmasked)
                return false;
        masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
@@ -5735,13 +5764,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       vmx->loaded_vmcs->nmi_known_unmasked = !masked;
-       if (masked)
-               vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-                             GUEST_INTR_STATE_NMI);
-       else
-               vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
-                               GUEST_INTR_STATE_NMI);
+       if (!enable_vnmi) {
+               if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
+                       vmx->loaded_vmcs->soft_vnmi_blocked = masked;
+                       vmx->loaded_vmcs->vnmi_blocked_time = 0;
+               }
+       } else {
+               vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+               if (masked)
+                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                     GUEST_INTR_STATE_NMI);
+               else
+                       vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                       GUEST_INTR_STATE_NMI);
+       }
 }
 
 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -5749,6 +5785,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
        if (to_vmx(vcpu)->nested.nested_run_pending)
                return 0;
 
+       if (!enable_vnmi &&
+           to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+               return 0;
+
        return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
                  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
                   | GUEST_INTR_STATE_NMI));
@@ -5877,11 +5917,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_invalid_opcode(intr_info)) {
-               if (is_guest_mode(vcpu)) {
-                       kvm_queue_exception(vcpu, UD_VECTOR);
-                       return 1;
-               }
+               WARN_ON_ONCE(is_guest_mode(vcpu));
                er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+               if (er == EMULATE_USER_EXIT)
+                       return 0;
                if (er != EMULATE_DONE)
                        kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
@@ -6476,6 +6515,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
         * AAK134, BY25.
         */
        if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+                       enable_vnmi &&
                        (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
 
@@ -6535,6 +6575,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 
 static int handle_nmi_window(struct kvm_vcpu *vcpu)
 {
+       WARN_ON_ONCE(!enable_vnmi);
        vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
                        CPU_BASED_VIRTUAL_NMI_PENDING);
        ++vcpu->stat.nmi_window_exits;
@@ -6562,7 +6603,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
-               err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
+               err = emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_USER_EXIT) {
                        ++vcpu->stat.mmio_exits;
@@ -6758,6 +6799,9 @@ static __init int hardware_setup(void)
        if (!cpu_has_vmx_flexpriority())
                flexpriority_enabled = 0;
 
+       if (!cpu_has_virtual_nmis())
+               enable_vnmi = 0;
+
        /*
         * set_apic_access_page_addr() is used to reload apic access
         * page upon invalidation.  No need to do anything if not
@@ -6962,7 +7006,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
        }
 
        /* Create a new VMCS */
-       item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
+       item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
        if (!item)
                return NULL;
        item->vmcs02.vmcs = alloc_vmcs();
@@ -7371,10 +7415,11 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
  */
 static void free_nested(struct vcpu_vmx *vmx)
 {
-       if (!vmx->nested.vmxon)
+       if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
        vmx->nested.vmxon = false;
+       vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = -1ull;
@@ -7979,6 +8024,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
         * "blocked by NMI" bit has to be set before next VM entry.
         */
        if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+                       enable_vnmi &&
                        (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
                                GUEST_INTR_STATE_NMI);
@@ -8823,6 +8869,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
                return 0;
        }
 
+       if (unlikely(!enable_vnmi &&
+                    vmx->loaded_vmcs->soft_vnmi_blocked)) {
+               if (vmx_interrupt_allowed(vcpu)) {
+                       vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+               } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
+                          vcpu->arch.nmi_pending) {
+                       /*
+                        * This CPU don't support us in finding the end of an
+                        * NMI-blocked window if the guest runs with IRQs
+                        * disabled. So we pull the trigger after 1 s of
+                        * futile waiting, but inform the user about this.
+                        */
+                       printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
+                              "state on VCPU %d after 1 s timeout\n",
+                              __func__, vcpu->vcpu_id);
+                       vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+               }
+       }
+
        if (exit_reason < kvm_vmx_max_exit_handlers
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu);
@@ -9105,33 +9170,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
 
        idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
-       if (vmx->loaded_vmcs->nmi_known_unmasked)
-               return;
-       /*
-        * Can't use vmx->exit_intr_info since we're not sure what
-        * the exit reason is.
-        */
-       exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-       unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
-       vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
-       /*
-        * SDM 3: 27.7.1.2 (September 2008)
-        * Re-set bit "block by NMI" before VM entry if vmexit caused by
-        * a guest IRET fault.
-        * SDM 3: 23.2.2 (September 2008)
-        * Bit 12 is undefined in any of the following cases:
-        *  If the VM exit sets the valid bit in the IDT-vectoring
-        *   information field.
-        *  If the VM exit is due to a double fault.
-        */
-       if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
-           vector != DF_VECTOR && !idtv_info_valid)
-               vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
-                             GUEST_INTR_STATE_NMI);
-       else
-               vmx->loaded_vmcs->nmi_known_unmasked =
-                       !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
-                         & GUEST_INTR_STATE_NMI);
+       if (enable_vnmi) {
+               if (vmx->loaded_vmcs->nmi_known_unmasked)
+                       return;
+               /*
+                * Can't use vmx->exit_intr_info since we're not sure what
+                * the exit reason is.
+                */
+               exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+               unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
+               vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+               /*
+                * SDM 3: 27.7.1.2 (September 2008)
+                * Re-set bit "block by NMI" before VM entry if vmexit caused by
+                * a guest IRET fault.
+                * SDM 3: 23.2.2 (September 2008)
+                * Bit 12 is undefined in any of the following cases:
+                *  If the VM exit sets the valid bit in the IDT-vectoring
+                *   information field.
+                *  If the VM exit is due to a double fault.
+                */
+               if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
+                   vector != DF_VECTOR && !idtv_info_valid)
+                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                     GUEST_INTR_STATE_NMI);
+               else
+                       vmx->loaded_vmcs->nmi_known_unmasked =
+                               !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
+                                 & GUEST_INTR_STATE_NMI);
+       } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
+               vmx->loaded_vmcs->vnmi_blocked_time +=
+                       ktime_to_ns(ktime_sub(ktime_get(),
+                                             vmx->loaded_vmcs->entry_time));
 }
 
 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
@@ -9248,6 +9318,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long debugctlmsr, cr3, cr4;
 
+       /* Record the guest's net vcpu time for enforced NMI injections. */
+       if (unlikely(!enable_vnmi &&
+                    vmx->loaded_vmcs->soft_vnmi_blocked))
+               vmx->loaded_vmcs->entry_time = ktime_get();
+
        /* Don't enter VMX if guest state is invalid, let the exit handler
           start emulation until we arrive back to a valid state */
        if (vmx->emulation_required)
@@ -9727,8 +9802,7 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
        cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
        cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
        cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
-       /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
-       cr4_fixed1_update(bit(11),            ecx, bit(2));
+       cr4_fixed1_update(X86_CR4_UMIP,       ecx, bit(X86_FEATURE_UMIP));
 
 #undef cr4_fixed1_update
 }
@@ -10802,6 +10876,11 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                        return 1;
        }
 
+       if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+               (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+               (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+                       return 1;
+
        return 0;
 }
 
@@ -11026,13 +11105,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qual;
-
-       if (kvm_event_needs_reinjection(vcpu))
-               return -EBUSY;
+       bool block_nested_events =
+           vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
 
        if (vcpu->arch.exception.pending &&
                nested_vmx_check_exception(vcpu, &exit_qual)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
                vcpu->arch.exception.pending = false;
@@ -11041,14 +11119,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
            vmx->nested.preemption_timer_expired) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
                return 0;
        }
 
        if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
                                  NMI_VECTOR | INTR_TYPE_NMI_INTR |
@@ -11064,7 +11142,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
 
        if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
            nested_exit_on_intr(vcpu)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_events)
                        return -EBUSY;
                nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
                return 0;
@@ -11251,6 +11329,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        kvm_clear_interrupt_queue(vcpu);
 }
 
+static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
+                       struct vmcs12 *vmcs12)
+{
+       u32 entry_failure_code;
+
+       nested_ept_uninit_mmu_context(vcpu);
+
+       /*
+        * Only PDPTE load can fail as the value of cr3 was checked on entry and
+        * couldn't have changed.
+        */
+       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+       if (!enable_ept)
+               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+}
+
 /*
  * A part of what we need to when the nested L2 guest exits and we want to
  * run its L1 parent, is to reset L1's guest state to the host state specified
@@ -11264,7 +11360,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                                   struct vmcs12 *vmcs12)
 {
        struct kvm_segment seg;
-       u32 entry_failure_code;
 
        if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
                vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -11291,17 +11386,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
        vmx_set_cr4(vcpu, vmcs12->host_cr4);
 
-       nested_ept_uninit_mmu_context(vcpu);
-
-       /*
-        * Only PDPTE load can fail as the value of cr3 was checked on entry and
-        * couldn't have changed.
-        */
-       if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
-               nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
-       if (!enable_ept)
-               vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
 
        if (enable_vpid) {
                /*
@@ -11531,6 +11616,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
         * accordingly.
         */
        nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+       load_vmcs12_mmu_host_state(vcpu, vmcs12);
+
        /*
         * The emulated instruction was already skipped in
         * nested_vmx_run, but the updated RIP was never
index 34c85aa2e2d1d40ffc65f461d45b50d8666b5491..eee8e7faf1af5778763b8181df472651d3573149 100644 (file)
@@ -107,6 +107,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
 static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
+static bool __read_mostly report_ignored_msrs = true;
+module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
+
 unsigned int min_timer_period_us = 500;
 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
 
@@ -1795,10 +1798,13 @@ u64 get_kvmclock_ns(struct kvm *kvm)
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
 
-       kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
-                          &hv_clock.tsc_shift,
-                          &hv_clock.tsc_to_system_mul);
-       ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       if (__this_cpu_read(cpu_tsc_khz)) {
+               kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+                                  &hv_clock.tsc_shift,
+                                  &hv_clock.tsc_to_system_mul);
+               ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+       } else
+               ret = ktime_get_boot_ns() + ka->kvmclock_offset;
 
        put_cpu();
 
@@ -1830,6 +1836,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
         */
        BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
+       if (guest_hv_clock.version & 1)
+               ++guest_hv_clock.version;  /* first time write, random junk */
+
        vcpu->hv_clock.version = guest_hv_clock.version + 1;
        kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
                                &vcpu->hv_clock,
@@ -2322,7 +2331,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                /* Drop writes to this legacy MSR -- see rdmsr
                 * counterpart for further detail.
                 */
-               vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
+               if (report_ignored_msrs)
+                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
+                               msr, data);
                break;
        case MSR_AMD64_OSVW_ID_LENGTH:
                if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
@@ -2359,8 +2370,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                    msr, data);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
-                                   msr, data);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu,
+                                       "ignored wrmsr: 0x%x data 0x%llx\n",
+                                       msr, data);
                        break;
                }
        }
@@ -2578,7 +2591,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                               msr_info->index);
                        return 1;
                } else {
-                       vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
+                       if (report_ignored_msrs)
+                               vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
+                                       msr_info->index);
                        msr_info->data = 0;
                }
                break;
@@ -5430,7 +5445,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
                vcpu->run->internal.ndata = 0;
-               r = EMULATE_FAIL;
+               r = EMULATE_USER_EXIT;
        }
        kvm_queue_exception(vcpu, UD_VECTOR);
 
@@ -5722,6 +5737,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                        if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
                                                emulation_type))
                                return EMULATE_DONE;
+                       if (ctxt->have_exception && inject_emulated_exception(vcpu))
+                               return EMULATE_DONE;
                        if (emulation_type & EMULTYPE_SKIP)
                                return EMULATE_FAIL;
                        return handle_emulation_failure(vcpu);
@@ -7250,12 +7267,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        struct fpu *fpu = &current->thread.fpu;
        int r;
-       sigset_t sigsaved;
 
        fpu__initialize(fpu);
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
@@ -7298,8 +7313,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 out:
        post_kvm_run_save(vcpu);
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
 
        return r;
 }
index 35625d279458f478b7d14ea47461e8a085924b13..9119d8e41f1ff59e2c8584a36f0f03d000bb1bbe 100644 (file)
@@ -733,11 +733,11 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
  *
  * Returns:
  *
- * A signed 8-bit value containing the default parameters on success.
+ * An int containing ORed-in default parameters on success.
  *
  * -EINVAL on error.
  */
-char insn_get_code_seg_params(struct pt_regs *regs)
+int insn_get_code_seg_params(struct pt_regs *regs)
 {
        struct desc_struct *desc;
        short sel;
index 12e377184ee4ad0c55d00c3784f08b393764a2bc..c4d55919fac19e06afbb00a4124fbf1b334b4d46 100644 (file)
@@ -896,7 +896,7 @@ EndTable
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
index 8ae0000cbdb34d8c6db0efacc566fb3a5b78d2d3..00b296617ca436c3cea79edcbb0a94d034ee52a3 100644 (file)
@@ -158,6 +158,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+       /* No address checking. See comment at mmap_address_hint_valid() */
        if (flags & MAP_FIXED) {
                if (prepare_hugepage_range(file, addr, len))
                        return -EINVAL;
@@ -165,12 +166,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        }
 
        if (addr) {
-               addr = ALIGN(addr, huge_page_size(h));
+               addr &= huge_page_mask(h);
+               if (!mmap_address_hint_valid(addr, len))
+                       goto get_unmapped_area;
+
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+               if (!vma || addr + len <= vm_start_gap(vma))
                        return addr;
        }
+
+get_unmapped_area:
        if (mm->get_unmapped_area == arch_get_unmapped_area)
                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
                                pgoff, flags);
index a9967982684649155cfcdc921d5247c8fbfe70d6..155ecbac9e28f10c2f83cdbf48037a2f8f6a44fe 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/compat.h>
 #include <asm/elf.h>
 
+#include "physaddr.h"
+
 struct va_alignment __read_mostly va_align = {
        .flags = -1,
 };
@@ -174,3 +176,63 @@ const char *arch_vma_name(struct vm_area_struct *vma)
                return "[mpx]";
        return NULL;
 }
+
+/**
+ * mmap_address_hint_valid - Validate the address hint of mmap
+ * @addr:      Address hint
+ * @len:       Mapping length
+ *
+ * Check whether @addr and @addr + @len result in a valid mapping.
+ *
+ * On 32bit this only checks whether @addr + @len is <= TASK_SIZE.
+ *
+ * On 64bit with 5-level page tables another sanity check is required
+ * because mappings requested by mmap(@addr, 0) which cross the 47-bit
+ * virtual address boundary can cause the following theoretical issue:
+ *
+ *  An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr
+ *  is below the border of the 47-bit address space and @addr + @len is
+ *  above the border.
+ *
+ *  With 4-level paging this request succeeds, but the resulting mapping
+ *  address will always be within the 47-bit virtual address space, because
+ *  the hint address does not result in a valid mapping and is
+ *  ignored. Hence applications which are not prepared to handle virtual
+ *  addresses above 47-bit work correctly.
+ *
+ *  With 5-level paging this request would be granted and result in a
+ *  mapping which crosses the border of the 47-bit virtual address
+ *  space. If the application cannot handle addresses above 47-bit this
+ *  will lead to misbehaviour and hard to diagnose failures.
+ *
+ * Therefore ignore address hints which would result in a mapping crossing
+ * the 47-bit virtual address boundary.
+ *
+ * Note, that in the same scenario with MAP_FIXED the behaviour is
+ * different. The request with @addr < 47-bit and @addr + @len > 47-bit
+ * fails on a 4-level paging machine but succeeds on a 5-level paging
+ * machine. It is reasonable to expect that an application does not rely on
+ * the failure of such a fixed mapping request, so the restriction is not
+ * applied.
+ */
+bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
+{
+       if (TASK_SIZE - len < addr)
+               return false;
+
+       return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW);
+}
+
+/* Can we access it for direct reading/writing? Must be RAM: */
+int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+       return addr + count <= __pa(high_memory);
+}
+
+/* Can we access it through mmap? Must be a valid physical address: */
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
+       phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
+
+       return phys_addr_valid(addr + count - 1);
+}
index 1038706edd87f3e142e6023da1c3a130bfaf1330..b8881750a3acd705789050c845c942673da999a8 100644 (file)
@@ -863,9 +863,9 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-static void blk_rq_timed_out_timer(unsigned long data)
+static void blk_rq_timed_out_timer(struct timer_list *t)
 {
-       struct request_queue *q = (struct request_queue *)data;
+       struct request_queue *q = from_timer(q, t, timeout);
 
        kblockd_schedule_work(&q->timeout_work);
 }
@@ -901,9 +901,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info->name = "block";
        q->node = node_id;
 
-       setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
-                   laptop_mode_timer_fn, (unsigned long) q);
-       setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
+                   laptop_mode_timer_fn, 0);
+       timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
        INIT_WORK(&q->timeout_work, NULL);
        INIT_LIST_HEAD(&q->queue_head);
        INIT_LIST_HEAD(&q->timeout_list);
index 3a2f3c96f3672e102d0498848e6086d9ad7184f8..28003bf9941c701ce8d534bbd8c8faaf58066eb9 100644 (file)
@@ -79,9 +79,9 @@ void blk_stat_add(struct request *rq)
        rcu_read_unlock();
 }
 
-static void blk_stat_timer_fn(unsigned long data)
+static void blk_stat_timer_fn(struct timer_list *t)
 {
-       struct blk_stat_callback *cb = (void *)data;
+       struct blk_stat_callback *cb = from_timer(cb, t, timer);
        unsigned int bucket;
        int cpu;
 
@@ -130,7 +130,7 @@ blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
        cb->bucket_fn = bucket_fn;
        cb->data = data;
        cb->buckets = buckets;
-       setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
+       timer_setup(&cb->timer, blk_stat_timer_fn, 0);
 
        return cb;
 }
index 96ad32623427d4794ad7563369bc9f89bb85fd26..825bc29767e6699ac85675d319a9866b70cc9b84 100644 (file)
@@ -225,7 +225,7 @@ struct throtl_data
        bool track_bio_latency;
 };
 
-static void throtl_pending_timer_fn(unsigned long arg);
+static void throtl_pending_timer_fn(struct timer_list *t);
 
 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 {
@@ -478,8 +478,7 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq)
        INIT_LIST_HEAD(&sq->queued[0]);
        INIT_LIST_HEAD(&sq->queued[1]);
        sq->pending_tree = RB_ROOT;
-       setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
-                   (unsigned long)sq);
+       timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
 }
 
 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
@@ -1249,9 +1248,9 @@ static bool throtl_can_upgrade(struct throtl_data *td,
  * the top-level service_tree is reached, throtl_data->dispatch_work is
  * kicked so that the ready bio's are issued.
  */
-static void throtl_pending_timer_fn(unsigned long arg)
+static void throtl_pending_timer_fn(struct timer_list *t)
 {
-       struct throtl_service_queue *sq = (void *)arg;
+       struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
        struct throtl_grp *tg = sq_to_tg(sq);
        struct throtl_data *td = sq_to_td(sq);
        struct request_queue *q = td->queue;
index 85cea9de324a4f577bcbfb273d8f3a8917ca60e6..358749c38894e31481fb0d903c0d1f7504311aa8 100644 (file)
@@ -1020,6 +1020,18 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(af_alg_sendpage);
 
+/**
+ * af_alg_free_resources - release resources required for crypto request
+ */
+void af_alg_free_resources(struct af_alg_async_req *areq)
+{
+       struct sock *sk = areq->sk;
+
+       af_alg_free_areq_sgls(areq);
+       sock_kfree_s(sk, areq, areq->areqlen);
+}
+EXPORT_SYMBOL_GPL(af_alg_free_resources);
+
 /**
  * af_alg_async_cb - AIO callback handler
  *
@@ -1036,18 +1048,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
        struct kiocb *iocb = areq->iocb;
        unsigned int resultlen;
 
-       lock_sock(sk);
-
        /* Buffer size written by crypto operation. */
        resultlen = areq->outlen;
 
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
-       __sock_put(sk);
+       af_alg_free_resources(areq);
+       sock_put(sk);
 
        iocb->ki_complete(iocb, err ? err : resultlen, 0);
-
-       release_sock(sk);
 }
 EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
index aacae0837aff73507165194a43d75d159d14fbdb..805f485ddf1be4711a9d2ec47998964543f1d217 100644 (file)
@@ -101,10 +101,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
        struct aead_tfm *aeadc = pask->private;
        struct crypto_aead *tfm = aeadc->aead;
        struct crypto_skcipher *null_tfm = aeadc->null_tfm;
-       unsigned int as = crypto_aead_authsize(tfm);
+       unsigned int i, as = crypto_aead_authsize(tfm);
        struct af_alg_async_req *areq;
-       struct af_alg_tsgl *tsgl;
-       struct scatterlist *src;
+       struct af_alg_tsgl *tsgl, *tmp;
+       struct scatterlist *rsgl_src, *tsgl_src = NULL;
        int err = 0;
        size_t used = 0;                /* [in]  TX bufs to be en/decrypted */
        size_t outlen = 0;              /* [out] RX bufs produced by kernel */
@@ -178,7 +178,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
        }
 
        processed = used + ctx->aead_assoclen;
-       tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
+       list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+               for (i = 0; i < tsgl->cur; i++) {
+                       struct scatterlist *process_sg = tsgl->sg + i;
+
+                       if (!(process_sg->length) || !sg_page(process_sg))
+                               continue;
+                       tsgl_src = process_sg;
+                       break;
+               }
+               if (tsgl_src)
+                       break;
+       }
+       if (processed && !tsgl_src) {
+               err = -EFAULT;
+               goto free;
+       }
 
        /*
         * Copy of AAD from source to destination
@@ -194,7 +209,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
         */
 
        /* Use the RX SGL as source (and destination) for crypto op. */
-       src = areq->first_rsgl.sgl.sg;
+       rsgl_src = areq->first_rsgl.sgl.sg;
 
        if (ctx->enc) {
                /*
@@ -207,7 +222,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                 *          v      v
                 * RX SGL: AAD || PT || Tag
                 */
-               err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+               err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
                                           areq->first_rsgl.sgl.sg, processed);
                if (err)
                        goto free;
@@ -225,7 +240,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                 */
 
                 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
-               err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+               err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
                                           areq->first_rsgl.sgl.sg, outlen);
                if (err)
                        goto free;
@@ -257,23 +272,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                                 areq->tsgl);
                } else
                        /* no RX SGL present (e.g. authentication only) */
-                       src = areq->tsgl;
+                       rsgl_src = areq->tsgl;
        }
 
        /* Initialize the crypto operation */
-       aead_request_set_crypt(&areq->cra_u.aead_req, src,
+       aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
                               areq->first_rsgl.sgl.sg, used, ctx->iv);
        aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
        aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
 
        if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
                /* AIO operation */
+               sock_hold(sk);
                areq->iocb = msg->msg_iocb;
                aead_request_set_callback(&areq->cra_u.aead_req,
                                          CRYPTO_TFM_REQ_MAY_BACKLOG,
                                          af_alg_async_cb, areq);
                err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
                                 crypto_aead_decrypt(&areq->cra_u.aead_req);
+
+               /* AIO operation in progress */
+               if (err == -EINPROGRESS || err == -EBUSY) {
+                       /* Remember output size that will be generated. */
+                       areq->outlen = outlen;
+
+                       return -EIOCBQUEUED;
+               }
+
+               sock_put(sk);
        } else {
                /* Synchronous operation */
                aead_request_set_callback(&areq->cra_u.aead_req,
@@ -285,19 +311,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                                &ctx->wait);
        }
 
-       /* AIO operation in progress */
-       if (err == -EINPROGRESS) {
-               sock_hold(sk);
-
-               /* Remember output size that will be generated. */
-               areq->outlen = outlen;
-
-               return -EIOCBQUEUED;
-       }
 
 free:
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
+       af_alg_free_resources(areq);
 
        return err ? err : outlen;
 }
index 9954b078f0b9cc7ccd853650f85920e29f159973..30cff827dd8fff048fa3e2ca7de770ab73022749 100644 (file)
@@ -117,6 +117,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 
        if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
                /* AIO operation */
+               sock_hold(sk);
                areq->iocb = msg->msg_iocb;
                skcipher_request_set_callback(&areq->cra_u.skcipher_req,
                                              CRYPTO_TFM_REQ_MAY_SLEEP,
@@ -124,6 +125,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                err = ctx->enc ?
                        crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
                        crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+
+               /* AIO operation in progress */
+               if (err == -EINPROGRESS || err == -EBUSY) {
+                       /* Remember output size that will be generated. */
+                       areq->outlen = len;
+
+                       return -EIOCBQUEUED;
+               }
+
+               sock_put(sk);
        } else {
                /* Synchronous operation */
                skcipher_request_set_callback(&areq->cra_u.skcipher_req,
@@ -136,19 +147,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                                                 &ctx->wait);
        }
 
-       /* AIO operation in progress */
-       if (err == -EINPROGRESS) {
-               sock_hold(sk);
-
-               /* Remember output size that will be generated. */
-               areq->outlen = len;
-
-               return -EIOCBQUEUED;
-       }
 
 free:
-       af_alg_free_areq_sgls(areq);
-       sock_kfree_s(sk, areq, areq->areqlen);
+       af_alg_free_resources(areq);
 
        return err ? err : len;
 }
index 1063b644efcdb3b2cbde33483207b7fc06e4eb5b..e284d9cb9237bfff0e86ebc7370c35fb7b58fee4 100644 (file)
@@ -19,6 +19,7 @@
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("PKCS#7 testing key type");
+MODULE_AUTHOR("Red Hat, Inc.");
 
 static unsigned pkcs7_usage;
 module_param_named(usage, pkcs7_usage, uint, S_IWUSR | S_IRUGO);
index d140d8bb2c96140c408b1e3450f288e562372743..c1ca1e86f5c4f86d1110343aa5194e2c3147f4d5 100644 (file)
@@ -11,6 +11,7 @@
 
 #define pr_fmt(fmt) "PKCS7: "fmt
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include "pkcs7_parser.h"
 #include "pkcs7-asn1.h"
 
+MODULE_DESCRIPTION("PKCS#7 parser");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
 struct pkcs7_parse_context {
        struct pkcs7_message    *msg;           /* Message being constructed */
        struct pkcs7_signed_info *sinfo;        /* SignedInfo being constructed */
index d916235d6cf512093bd2027c1f6f633775c3f19c..bc3035ef27a22b3ca593532dedc8020bb2773a2a 100644 (file)
@@ -22,6 +22,8 @@
 #include <crypto/public_key.h>
 #include <crypto/akcipher.h>
 
+MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
+MODULE_AUTHOR("Red Hat, Inc.");
 MODULE_LICENSE("GPL");
 
 /*
index eea71dc9686c29fd2c3fe07d2c9fc0b213bc99b7..c9013582c026748a10b09eeb09d5c71f0571ea7a 100644 (file)
@@ -275,4 +275,5 @@ module_init(x509_key_init);
 module_exit(x509_key_exit);
 
 MODULE_DESCRIPTION("X.509 certificate parser");
+MODULE_AUTHOR("Red Hat, Inc.");
 MODULE_LICENSE("GPL");
index d5692e35fab1f069376f7c54358ff5e5f0cb352e..778e0ff42bfa801eda5be848da9e6747ebbc2626 100644 (file)
@@ -522,6 +522,9 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
        scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
 
+       scatterwalk_done(&walk->in, 0, walk->total);
+       scatterwalk_done(&walk->out, 0, walk->total);
+
        walk->iv = req->iv;
        walk->oiv = req->iv;
 
index acf16c323e385d68ba01aa7bec0b9c7aaba32ddd..9287ec958b7095f0385c33c095ed14ac9feba445 100644 (file)
@@ -293,7 +293,7 @@ static inline void __init show_version (void) {
   
 */
 
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
 /********** globals **********/
 
 static unsigned short debug = 0;
@@ -1493,8 +1493,8 @@ static const struct atmdev_ops amb_ops = {
 };
 
 /********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
-  amb_dev * dev = (amb_dev *) arg;
+static void do_housekeeping (struct timer_list *t) {
+  amb_dev * dev = from_timer(dev, t, housekeeping);
   
   // could collect device-specific (not driver/atm-linux) stats here
       
@@ -2258,7 +2258,7 @@ static int amb_probe(struct pci_dev *pci_dev,
 
        PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
                dev->atm_dev->number, dev, dev->atm_dev);
-               dev->atm_dev->dev_data = (void *) dev;
+       dev->atm_dev->dev_data = (void *) dev;
 
        // register our address
        amb_esi (dev, dev->atm_dev->esi);
@@ -2267,8 +2267,7 @@ static int amb_probe(struct pci_dev *pci_dev,
        dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
        dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
 
-       setup_timer(&dev->housekeeping, do_housekeeping,
-                   (unsigned long)dev);
+       timer_setup(&dev->housekeeping, do_housekeeping, 0);
        mod_timer(&dev->housekeeping, jiffies);
 
        // enable host interrupts
index 6b6368a565261b5037d5d5f83899a9d664671222..d97c05690faa99363ac709fb237335a0678af4c0 100644 (file)
@@ -1656,9 +1656,9 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
 
 
 #ifdef FS_POLL_FREQ
-static void fs_poll (unsigned long data)
+static void fs_poll (struct timer_list *t)
 {
-       struct fs_dev *dev = (struct fs_dev *) data;
+       struct fs_dev *dev = from_timer(dev, t, timer);
   
        fs_irq (0, dev);
        dev->timer.expires = jiffies + FS_POLL_FREQ;
@@ -1885,9 +1885,7 @@ static int fs_init(struct fs_dev *dev)
        }
 
 #ifdef FS_POLL_FREQ
-       init_timer (&dev->timer);
-       dev->timer.data = (unsigned long) dev;
-       dev->timer.function = fs_poll;
+       timer_setup(&dev->timer, fs_poll, 0);
        dev->timer.expires = jiffies + FS_POLL_FREQ;
        add_timer (&dev->timer);
 #endif
index 126855e6cb7d272629b097032291a849b94ef5f0..6ebc4e4820fc4b267351970047e29a7c700902cc 100644 (file)
@@ -3083,8 +3083,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
            ASSERT(fore200e_vcc);
 
            len = sprintf(page,
-                         "  %08x  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
-                         (u32)(unsigned long)vcc,
+                         "  %pK  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
+                         vcc,
                          vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
                          fore200e_vcc->tx_pdu,
                          fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
index e121b84857310836742690c001dec6502dc68db2..5ddc203206b8fb739b2f2552e67d17d2545dfe02 100644 (file)
@@ -357,7 +357,7 @@ static inline void __init show_version (void) {
 
 /********** globals **********/
 
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
 
 static unsigned short debug = 0;
 static unsigned short vpi_bits = 0;
@@ -1418,9 +1418,9 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
 
 /********** housekeeping **********/
 
-static void do_housekeeping (unsigned long arg) {
+static void do_housekeeping (struct timer_list *t) {
   // just stats at the moment
-  hrz_dev * dev = (hrz_dev *) arg;
+  hrz_dev * dev = from_timer(dev, t, housekeeping);
 
   // collect device-specific (not driver/atm-linux) stats here
   dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
@@ -2796,7 +2796,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
        dev->atm_dev->ci_range.vpi_bits = vpi_bits;
        dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
 
-       setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
+       timer_setup(&dev->housekeeping, do_housekeeping, 0);
        mod_timer(&dev->housekeeping, jiffies);
 
 out:
index 909744eb7bab419eec2dc71e2c79c87231812ce3..0a67487c0b1d3f0fbc85215eeb9d733966482b53 100644 (file)
@@ -45,8 +45,8 @@ static DEFINE_SPINLOCK(idt77105_priv_lock);
 #define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg)
 #define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg)
 
-static void idt77105_stats_timer_func(unsigned long);
-static void idt77105_restart_timer_func(unsigned long);
+static void idt77105_stats_timer_func(struct timer_list *);
+static void idt77105_restart_timer_func(struct timer_list *);
 
 
 static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func);
@@ -80,7 +80,7 @@ static u16 get_counter(struct atm_dev *dev, int counter)
  * a separate copy of the stats allows implementation of
  * an ioctl which gathers the stats *without* zero'ing them.
  */
-static void idt77105_stats_timer_func(unsigned long dummy)
+static void idt77105_stats_timer_func(struct timer_list *unused)
 {
        struct idt77105_priv *walk;
        struct atm_dev *dev;
@@ -109,7 +109,7 @@ static void idt77105_stats_timer_func(unsigned long dummy)
  * interrupts need to be disabled when the cable is pulled out
  * to avoid lots of spurious cell error interrupts.
  */
-static void idt77105_restart_timer_func(unsigned long dummy)
+static void idt77105_restart_timer_func(struct timer_list *unused)
 {
        struct idt77105_priv *walk;
        struct atm_dev *dev;
index 0e3b9c44c8089c3d82ad2d518ce0c280834bb86a..0277f36be85b94479c5aa303e2fc25b258451586 100644 (file)
@@ -1528,9 +1528,9 @@ idt77252_tx(struct idt77252_dev *card)
 
 
 static void
-tst_timer(unsigned long data)
+tst_timer(struct timer_list *t)
 {
-       struct idt77252_dev *card = (struct idt77252_dev *)data;
+       struct idt77252_dev *card = from_timer(card, t, tst_timer);
        unsigned long base, idle, jump;
        unsigned long flags;
        u32 pc;
@@ -3634,7 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
        spin_lock_init(&card->cmd_lock);
        spin_lock_init(&card->tst_lock);
 
-       setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
+       timer_setup(&card->tst_timer, tst_timer, 0);
 
        /* Do the I/O remapping... */
        card->membase = ioremap(membase, 1024);
index 12f646760b6827e3ddf8a63290cfb22a08e51d0b..98a3a43484c8b410f3a217cd8cb11bd8d6750370 100644 (file)
@@ -75,7 +75,7 @@ static void desc_dbg(IADEV *iadev);
 static IADEV *ia_dev[8];
 static struct atm_dev *_ia_dev[8];
 static int iadev_count;
-static void ia_led_timer(unsigned long arg);
+static void ia_led_timer(struct timer_list *unused);
 static DEFINE_TIMER(ia_timer, ia_led_timer);
 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
@@ -2432,7 +2432,7 @@ static void ia_update_stats(IADEV *iadev) {
     return;
 }
   
-static void ia_led_timer(unsigned long arg) {
+static void ia_led_timer(struct timer_list *unused) {
        unsigned long flags;
        static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
         u_char i;
index 2351dad78ff58aec2d714059c73da094bb723641..5f8e009b2da1cb02d71fa608a2f13cdcb39efd51 100644 (file)
@@ -1586,8 +1586,8 @@ static int service_buffer_allocate(struct lanai_dev *lanai)
            lanai->pci);
        if (unlikely(lanai->service.start == NULL))
                return -ENOMEM;
-       DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n",
-           (unsigned long) lanai->service.start,
+       DPRINTK("allocated service buffer at %p, size %zu(%d)\n",
+           lanai->service.start,
            lanai_buf_size(&lanai->service),
            lanai_buf_size_cardorder(&lanai->service));
        /* Clear ServWrite register to be safe */
@@ -1761,9 +1761,9 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
 }
 #endif /* !DEBUG_RW */
 
-static void lanai_timed_poll(unsigned long arg)
+static void lanai_timed_poll(struct timer_list *t)
 {
-       struct lanai_dev *lanai = (struct lanai_dev *) arg;
+       struct lanai_dev *lanai = from_timer(lanai, t, timer);
 #ifndef DEBUG_RW
        unsigned long flags;
 #ifdef USE_POWERDOWN
@@ -1790,10 +1790,8 @@ static void lanai_timed_poll(unsigned long arg)
 
 static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
 {
-       init_timer(&lanai->timer);
+       timer_setup(&lanai->timer, lanai_timed_poll, 0);
        lanai->timer.expires = jiffies + LANAI_POLL_PERIOD;
-       lanai->timer.data = (unsigned long) lanai;
-       lanai->timer.function = lanai_timed_poll;
        add_timer(&lanai->timer);
 }
 
@@ -2220,9 +2218,9 @@ static int lanai_dev_open(struct atm_dev *atmdev)
 #endif
        memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
        lanai_timed_poll_start(lanai);
-       printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
+       printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u "
                "(%pMF)\n", lanai->number, (int) lanai->pci->revision,
-               (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
+               lanai->base, lanai->pci->irq, atmdev->esi);
        printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
            "board_rev=%d\n", lanai->number,
            lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
index a9702836cbaeb10b233c6e1b7b818e527f2d1542..cbec9adc01c768e95cf8a3ad000697019f38f65c 100644 (file)
@@ -145,7 +145,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
 #ifdef EXTRA_DEBUG
 static void which_list(ns_dev * card, struct sk_buff *skb);
 #endif
-static void ns_poll(unsigned long arg);
+static void ns_poll(struct timer_list *unused);
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
                       unsigned long addr);
 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -284,10 +284,8 @@ static int __init nicstar_init(void)
        XPRINTK("nicstar: nicstar_init() returned.\n");
 
        if (!error) {
-               init_timer(&ns_timer);
+               timer_setup(&ns_timer, ns_poll, 0);
                ns_timer.expires = jiffies + NS_POLL_PERIOD;
-               ns_timer.data = 0UL;
-               ns_timer.function = ns_poll;
                add_timer(&ns_timer);
        }
 
@@ -2681,7 +2679,7 @@ static void which_list(ns_dev * card, struct sk_buff *skb)
 }
 #endif /* EXTRA_DEBUG */
 
-static void ns_poll(unsigned long arg)
+static void ns_poll(struct timer_list *unused)
 {
        int i;
        ns_dev *card;
index b8825f2d79e0205631e13f9c4a38605d6a952ca5..4b044710a8cf360a5682f0eee49eb14e58086d1e 100644 (file)
@@ -177,7 +177,7 @@ static int set_loopback(struct atm_dev *dev,int mode)
                default:
                        return -EINVAL;
        }
-        dev->ops->phy_put(dev, control, reg);
+       dev->ops->phy_put(dev, control, reg);
        PRIV(dev)->loop_mode = mode;
        return 0;
 }
index d7d21118d3e0f5fc23bad06ccbaaf5c2b409e9e1..2c2ed9cf879626692706bdf86f447cb4d8c05c26 100644 (file)
@@ -136,6 +136,7 @@ config CFAG12864B_RATE
 
 config IMG_ASCII_LCD
        tristate "Imagination Technologies ASCII LCD Display"
+       depends on HAS_IOMEM
        default y if MIPS_MALTA || MIPS_SEAD3
        select SYSCON
        help
index 680ee1d36ac9a7f31b182b977f062aed1a590c8b..38559f04db2cfdf2ad9d71b562ce6825317aa8e1 100644 (file)
@@ -481,7 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
         * Use timer struct to check if the given source is initialized
         * by wakeup_source_add.
         */
-       return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
+       return ws->timer.function != pm_wakeup_timer_fn;
 }
 
 /*
index 255591ab37168dd8d6f5c9780ce61e339be81810..442e777bdfb2ba84449c3c273997be71d36ffba2 100644 (file)
@@ -3079,11 +3079,10 @@ DAC960_InitializeController(DAC960_Controller_T *Controller)
       /*
        Initialize the Monitoring Timer.
       */
-      init_timer(&Controller->MonitoringTimer);
+      timer_setup(&Controller->MonitoringTimer,
+                  DAC960_MonitoringTimerFunction, 0);
       Controller->MonitoringTimer.expires =
        jiffies + DAC960_MonitoringTimerInterval;
-      Controller->MonitoringTimer.data = (unsigned long) Controller;
-      Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
       add_timer(&Controller->MonitoringTimer);
       Controller->ControllerInitialized = true;
       return true;
@@ -5620,9 +5619,9 @@ static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
   the status of DAC960 Controllers.
 */
 
-static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
+static void DAC960_MonitoringTimerFunction(struct timer_list *t)
 {
-  DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
+  DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
   DAC960_Command_T *Command;
   unsigned long flags;
 
index 85fa9bb6375964f04bedcc644d7a8185518c69b7..6a6226a2b9320eb275839962a048b530bcc3fd29 100644 (file)
@@ -4406,7 +4406,7 @@ static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
 static irqreturn_t DAC960_P_InterruptHandler(int, void *);
 static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
 static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(unsigned long);
+static void DAC960_MonitoringTimerFunction(struct timer_list *);
 static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
                           DAC960_Controller_T *, ...);
 static void DAC960_CreateProcEntries(DAC960_Controller_T *);
index 55ab25f79a08991536359206dad387f2bfc7e52d..812fed069708f181c4b16ce972e0919c4541a803 100644 (file)
@@ -1429,7 +1429,7 @@ aoecmd_ata_id(struct aoedev *d)
 
        d->rttavg = RTTAVG_INIT;
        d->rttdev = RTTDEV_INIT;
-       d->timer.function = (TIMER_FUNC_TYPE)rexmit_timer;
+       d->timer.function = rexmit_timer;
 
        skb = skb_clone(skb, GFP_ATOMIC);
        if (skb) {
index ae596e55bcb67c18594477b7f8b27ec02d82fcc6..8bc3b9fd8dd2be0df64d166dbc4c2b65eef03305 100644 (file)
@@ -342,8 +342,8 @@ static int NeedSeek = 0;
 static void fd_select_side( int side );
 static void fd_select_drive( int drive );
 static void fd_deselect( void );
-static void fd_motor_off_timer( unsigned long dummy );
-static void check_change( unsigned long dummy );
+static void fd_motor_off_timer(struct timer_list *unused);
+static void check_change(struct timer_list *unused);
 static irqreturn_t floppy_irq (int irq, void *dummy);
 static void fd_error( void );
 static int do_format(int drive, int type, struct atari_format_descr *desc);
@@ -353,12 +353,12 @@ static void fd_calibrate_done( int status );
 static void fd_seek( void );
 static void fd_seek_done( int status );
 static void fd_rwsec( void );
-static void fd_readtrack_check( unsigned long dummy );
+static void fd_readtrack_check(struct timer_list *unused);
 static void fd_rwsec_done( int status );
 static void fd_rwsec_done1(int status);
 static void fd_writetrack( void );
 static void fd_writetrack_done( int status );
-static void fd_times_out( unsigned long dummy );
+static void fd_times_out(struct timer_list *unused);
 static void finish_fdc( void );
 static void finish_fdc_done( int dummy );
 static void setup_req_params( int drive );
@@ -479,7 +479,7 @@ static void fd_deselect( void )
  * counts the index signals, which arrive only if one drive is selected.
  */
 
-static void fd_motor_off_timer( unsigned long dummy )
+static void fd_motor_off_timer(struct timer_list *unused)
 {
        unsigned char status;
 
@@ -515,7 +515,7 @@ static void fd_motor_off_timer( unsigned long dummy )
  * as possible) and keep track of the current state of the write protection.
  */
 
-static void check_change( unsigned long dummy )
+static void check_change(struct timer_list *unused)
 {
        static int    drive = 0;
 
@@ -966,7 +966,7 @@ static void fd_rwsec( void )
 }
 
     
-static void fd_readtrack_check( unsigned long dummy )
+static void fd_readtrack_check(struct timer_list *unused)
 {
        unsigned long flags, addr, addr2;
 
@@ -1237,7 +1237,7 @@ static void fd_writetrack_done( int status )
        fd_error();
 }
 
-static void fd_times_out( unsigned long dummy )
+static void fd_times_out(struct timer_list *unused)
 {
        atari_disable_irq( IRQ_MFP_FDC );
        if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
index adc877dfef5c2c65ff572e864ef2ef19ad5f5f24..38fc5f397fdede04ebaf4f3f9c2e89118ee6b235 100644 (file)
@@ -348,7 +348,6 @@ struct rbd_client_id {
 struct rbd_mapping {
        u64                     size;
        u64                     features;
-       bool                    read_only;
 };
 
 /*
@@ -450,12 +449,11 @@ static DEFINE_IDA(rbd_dev_id_ida);
 static struct workqueue_struct *rbd_wq;
 
 /*
- * Default to false for now, as single-major requires >= 0.75 version of
- * userspace rbd utility.
+ * single-major requires >= 0.75 version of userspace rbd utility.
  */
-static bool single_major = false;
+static bool single_major = true;
 module_param(single_major, bool, S_IRUGO);
-MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
+MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
 
 static int rbd_img_request_submit(struct rbd_img_request *img_request);
 
@@ -608,9 +606,6 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
        struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
        bool removing = false;
 
-       if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
-               return -EROFS;
-
        spin_lock_irq(&rbd_dev->lock);
        if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
                removing = true;
@@ -640,46 +635,24 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
 
 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
 {
-       int ret = 0;
-       int val;
-       bool ro;
-       bool ro_changed = false;
+       int ro;
 
-       /* get_user() may sleep, so call it before taking rbd_dev->lock */
-       if (get_user(val, (int __user *)(arg)))
+       if (get_user(ro, (int __user *)arg))
                return -EFAULT;
 
-       ro = val ? true : false;
-       /* Snapshot doesn't allow to write*/
+       /* Snapshots can't be marked read-write */
        if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
                return -EROFS;
 
-       spin_lock_irq(&rbd_dev->lock);
-       /* prevent others open this device */
-       if (rbd_dev->open_count > 1) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       if (rbd_dev->mapping.read_only != ro) {
-               rbd_dev->mapping.read_only = ro;
-               ro_changed = true;
-       }
-
-out:
-       spin_unlock_irq(&rbd_dev->lock);
-       /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
-       if (ret == 0 && ro_changed)
-               set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
-
-       return ret;
+       /* Let blkdev_roset() handle it */
+       return -ENOTTY;
 }
 
 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
                        unsigned int cmd, unsigned long arg)
 {
        struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
-       int ret = 0;
+       int ret;
 
        switch (cmd) {
        case BLKROSET:
@@ -4050,15 +4023,8 @@ static void rbd_queue_workfn(struct work_struct *work)
                goto err_rq;
        }
 
-       /* Only reads are allowed to a read-only device */
-
-       if (op_type != OBJ_OP_READ) {
-               if (rbd_dev->mapping.read_only) {
-                       result = -EROFS;
-                       goto err_rq;
-               }
-               rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
-       }
+       rbd_assert(op_type == OBJ_OP_READ ||
+                  rbd_dev->spec->snap_id == CEPH_NOSNAP);
 
        /*
         * Quit early if the mapped snapshot no longer exists.  It's
@@ -4423,7 +4389,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        /* enable the discard support */
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
        q->limits.discard_granularity = segment_size;
-       q->limits.discard_alignment = segment_size;
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
        blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
 
@@ -5994,7 +5959,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
                goto err_out_disk;
 
        set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
-       set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
+       set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
 
        ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
        if (ret)
@@ -6145,7 +6110,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        struct rbd_options *rbd_opts = NULL;
        struct rbd_spec *spec = NULL;
        struct rbd_client *rbdc;
-       bool read_only;
        int rc;
 
        if (!try_module_get(THIS_MODULE))
@@ -6194,11 +6158,8 @@ static ssize_t do_rbd_add(struct bus_type *bus,
        }
 
        /* If we are mapping a snapshot it must be marked read-only */
-
-       read_only = rbd_dev->opts->read_only;
        if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
-               read_only = true;
-       rbd_dev->mapping.read_only = read_only;
+               rbd_dev->opts->read_only = true;
 
        rc = rbd_dev_device_setup(rbd_dev);
        if (rc)
index 926dce9c452faf927d79fb0b99fd1a4e473aea6e..c148e83e4ed72b0b430853c779ce835f62bc310f 100644 (file)
@@ -203,9 +203,9 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
        return 0;
 }
 
-static void creg_cmd_timed_out(unsigned long data)
+static void creg_cmd_timed_out(struct timer_list *t)
 {
-       struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+       struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
        struct creg_cmd *cmd;
 
        spin_lock(&card->creg_ctrl.lock);
@@ -745,8 +745,7 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
        mutex_init(&card->creg_ctrl.reset_lock);
        INIT_LIST_HEAD(&card->creg_ctrl.queue);
        spin_lock_init(&card->creg_ctrl.lock);
-       setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
-                   (unsigned long) card);
+       timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
 
        return 0;
 }
index 6a1b2177951c1521f50b9364739bd433a16a577e..beaccf197a5a85f41eaf1798862f32ce3eb0cd06 100644 (file)
@@ -354,9 +354,9 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
                rsxx_complete_dma(ctrl, dma, status);
 }
 
-static void dma_engine_stalled(unsigned long data)
+static void dma_engine_stalled(struct timer_list *t)
 {
-       struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+       struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
        int cnt;
 
        if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
@@ -838,8 +838,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
        mutex_init(&ctrl->work_lock);
        INIT_LIST_HEAD(&ctrl->queue);
 
-       setup_timer(&ctrl->activity_timer, dma_engine_stalled,
-                                       (unsigned long)ctrl);
+       timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
 
        ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
        if (!ctrl->issue_wq)
index 2819f23e8bf2fe8f18cac9ccae63a8e298adc73a..de0d08133c7ee071e0ce44167fda6e8a0e1d9c51 100644 (file)
@@ -707,9 +707,9 @@ static void skd_start_queue(struct work_struct *work)
        blk_mq_start_hw_queues(skdev->queue);
 }
 
-static void skd_timer_tick(ulong arg)
+static void skd_timer_tick(struct timer_list *t)
 {
-       struct skd_device *skdev = (struct skd_device *)arg;
+       struct skd_device *skdev = from_timer(skdev, t, timer);
        unsigned long reqflags;
        u32 state;
 
@@ -857,7 +857,7 @@ static int skd_start_timer(struct skd_device *skdev)
 {
        int rc;
 
-       setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+       timer_setup(&skdev->timer, skd_timer_tick, 0);
 
        rc = mod_timer(&skdev->timer, (jiffies + HZ));
        if (rc)
index ad9749463d4fa9a382afa7f24587bbbe3a2efcc9..5ca56bfae63cf69872cd18270fb8c980db4eddae 100644 (file)
@@ -81,7 +81,7 @@ struct vdc_port {
 
 static void vdc_ldc_reset(struct vdc_port *port);
 static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(unsigned long _arg);
+static void vdc_ldc_reset_timer(struct timer_list *t);
 
 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
 {
@@ -974,8 +974,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
         */
        ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
        port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
-       setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
-                   (unsigned long)port);
+       timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
        INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
 
        err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
@@ -1087,9 +1086,9 @@ static void vdc_queue_drain(struct vdc_port *port)
                __blk_end_request_all(req, BLK_STS_IOERR);
 }
 
-static void vdc_ldc_reset_timer(unsigned long _arg)
+static void vdc_ldc_reset_timer(struct timer_list *t)
 {
-       struct vdc_port *port = (struct vdc_port *) _arg;
+       struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
        struct vio_driver_state *vio = &port->vio;
        unsigned long flags;
 
index e620e423102b89f05fef575716b1e1657363d6f0..af51015d056eff1a6b1a2dfac264e30259c669d9 100644 (file)
@@ -397,7 +397,7 @@ static void set_timeout(struct floppy_state *fs, int nticks,
        if (fs->timeout_pending)
                del_timer(&fs->timeout);
        fs->timeout.expires = jiffies + nticks;
-       fs->timeout.function = (TIMER_FUNC_TYPE)proc;
+       fs->timeout.function = proc;
        add_timer(&fs->timeout);
        fs->timeout_pending = 1;
 }
index 0677d2514665c75c3c45c27b827b01d6f045fc07..8077123678ad8b27fb7807f230ca76c3b0a37e01 100644 (file)
@@ -718,7 +718,7 @@ static void check_batteries(struct cardinfo *card)
                set_fault_to_battery_status(card);
 }
 
-static void check_all_batteries(unsigned long ptr)
+static void check_all_batteries(struct timer_list *unused)
 {
        int i;
 
@@ -738,8 +738,7 @@ static void check_all_batteries(unsigned long ptr)
 
 static void init_battery_timer(void)
 {
-       init_timer(&battery_timer);
-       battery_timer.function = check_all_batteries;
+       timer_setup(&battery_timer, check_all_batteries, 0);
        battery_timer.expires = jiffies + (HZ * 60);
        add_timer(&battery_timer);
 }
index 14459d66ef0cd8ac223992f2f69ad2b725f8481c..c24589414c75926b934b9bb117b237bcb686e736 100644 (file)
@@ -770,9 +770,9 @@ static void ace_fsm_tasklet(unsigned long data)
        spin_unlock_irqrestore(&ace->lock, flags);
 }
 
-static void ace_stall_timer(unsigned long data)
+static void ace_stall_timer(struct timer_list *t)
 {
-       struct ace_device *ace = (void *)data;
+       struct ace_device *ace = from_timer(ace, t, stall_timer);
        unsigned long flags;
 
        dev_warn(ace->dev,
@@ -984,7 +984,7 @@ static int ace_setup(struct ace_device *ace)
         * Initialize the state machine tasklet and stall timer
         */
        tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
-       setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+       timer_setup(&ace->stall_timer, ace_stall_timer, 0);
 
        /*
         * Initialize the request queue
index 1a0385ed64171c2e62cc16c36b9a546a9bc24ed1..839ee61d352a218321ffedf3de339c51d02fbf74 100644 (file)
@@ -74,7 +74,7 @@
 #endif                         /* TRACING */
 
 static DEFINE_MUTEX(dtlk_mutex);
-static void dtlk_timer_tick(unsigned long data);
+static void dtlk_timer_tick(struct timer_list *unused);
 
 static int dtlk_major;
 static int dtlk_port_lpc;
@@ -259,7 +259,7 @@ static unsigned int dtlk_poll(struct file *file, poll_table * wait)
        return mask;
 }
 
-static void dtlk_timer_tick(unsigned long data)
+static void dtlk_timer_tick(struct timer_list *unused)
 {
        TRACE_TEXT(" dtlk_timer_tick");
        wake_up_interruptible(&dtlk_process_list);
index 5b8db2ed844d337ad51cc1868d2d34213f03dfe8..7700280717f28803a37a402b36fc48d0c3e08e56 100644 (file)
@@ -122,11 +122,11 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
 /* Last time scheduled */
 static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
 
-static void hangcheck_fire(unsigned long);
+static void hangcheck_fire(struct timer_list *);
 
 static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
 
-static void hangcheck_fire(unsigned long data)
+static void hangcheck_fire(struct timer_list *unused)
 {
        unsigned long long cur_tsc, tsc_diff;
 
index c4ef73c6f45538bc014a98492e2d077cd0c356d5..6edfaa72b98bb76d18ca25e193db2eb5f1d7a427 100644 (file)
@@ -367,9 +367,9 @@ static const struct file_operations bt_bmc_fops = {
        .unlocked_ioctl = bt_bmc_ioctl,
 };
 
-static void poll_timer(unsigned long data)
+static void poll_timer(struct timer_list *t)
 {
-       struct bt_bmc *bt_bmc = (void *)data;
+       struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
 
        bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
        wake_up(&bt_bmc->queue);
@@ -487,8 +487,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
                dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
        } else {
                dev_info(dev, "No IRQ; using timer\n");
-               setup_timer(&bt_bmc->poll_timer, poll_timer,
-                           (unsigned long)bt_bmc);
+               timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
                bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
                add_timer(&bt_bmc->poll_timer);
        }
index 9de189db2cc3c6486884decb5073d4c1e3830707..f45732a2cb3e06d4830fd27b1f926a0b8ef2d0ca 100644 (file)
@@ -4766,7 +4766,7 @@ static struct timer_list ipmi_timer;
 
 static atomic_t stop_operation;
 
-static void ipmi_timeout(unsigned long data)
+static void ipmi_timeout(struct timer_list *unused)
 {
        ipmi_smi_t intf;
        int nt = 0;
@@ -5172,7 +5172,7 @@ static int ipmi_init_msghandler(void)
 
 #endif /* CONFIG_IPMI_PROC_INTERFACE */
 
-       setup_timer(&ipmi_timer, ipmi_timeout, 0);
+       timer_setup(&ipmi_timer, ipmi_timeout, 0);
        mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
 
        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
index 71d33a1807e46fabd27984e4bcafe993e9ef0f13..779869ed32b1516261e80fffd440b3ca1e1132ea 100644 (file)
@@ -1091,9 +1091,9 @@ static void set_need_watch(void *send_info, bool enable)
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
 
-static void smi_timeout(unsigned long data)
+static void smi_timeout(struct timer_list *t)
 {
-       struct smi_info   *smi_info = (struct smi_info *) data;
+       struct smi_info   *smi_info = from_timer(smi_info, t, si_timer);
        enum si_sm_result smi_result;
        unsigned long     flags;
        unsigned long     jiffies_now;
@@ -1166,7 +1166,7 @@ static int smi_start_processing(void       *send_info,
        new_smi->intf = intf;
 
        /* Set up the timer that drives the interface. */
-       setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+       timer_setup(&new_smi->si_timer, smi_timeout, 0);
        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
        /* Try to claim any interrupts. */
index 466b3a1c0adfd3fe419ee57338d279d18436eb7e..3cfaec728604d1956c8e8c9e21922e0bee9349c6 100644 (file)
@@ -551,9 +551,9 @@ static void start_get(struct ssif_info *ssif_info)
        }
 }
 
-static void retry_timeout(unsigned long data)
+static void retry_timeout(struct timer_list *t)
 {
-       struct ssif_info *ssif_info = (void *) data;
+       struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
        unsigned long oflags, *flags;
        bool waiting;
 
@@ -1691,8 +1691,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        spin_lock_init(&ssif_info->lock);
        ssif_info->ssif_state = SSIF_NORMAL;
-       setup_timer(&ssif_info->retry_timer, retry_timeout,
-                   (unsigned long)ssif_info);
+       timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
 
        for (i = 0; i < SSIF_NUM_STATS; i++)
                atomic_set(&ssif_info->stats[i], 0);
index 970e1242a282a097405cf30d0c87e7518bca67f4..6aefe5370e5b15c45bda8e156ccdfd1b99e9e3f5 100644 (file)
@@ -343,6 +343,10 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
        size_t size = vma->vm_end - vma->vm_start;
        phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 
+       /* Does it even fit in phys_addr_t? */
+       if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+               return -EINVAL;
+
        /* It's illegal to wrap around the end of the physical address space. */
        if (offset + (phys_addr_t)size - 1 < offset)
                return -EINVAL;
index 44006ed9558f20690bb71b9f96a0ca382a996d93..a7113b78251a52754aa25d20fc2be7ed1c3abb96 100644 (file)
@@ -23,7 +23,7 @@
 #define __NWBUTTON_C           /* Tell the header file who we are */
 #include "nwbutton.h"
 
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
 
 static int button_press_count;         /* The count of button presses */
 /* Times for the end of a sequence */
@@ -127,7 +127,7 @@ static void button_consume_callbacks (int bpcount)
  * any matching registered function callbacks, initiate reboot, etc.).
  */
 
-static void button_sequence_finished (unsigned long parameters)
+static void button_sequence_finished(struct timer_list *unused)
 {
        if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
            button_press_count == reboot_count)
index abee3ca748019bb8c5c71a9f547868b49584f17d..9dedfd7adc0e7f8b14d410454ae1b10abfbe80cc 100644 (file)
@@ -25,7 +25,7 @@ struct button_callback {
 
 /* Function prototypes: */
 
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
 static irqreturn_t button_handler (int irq, void *dev_id);
 int button_init (void);
 int button_add_callback (void (*callback) (void), int count);
index 616871e68e0901e147686ca6b3028151636fa8f8..5542a438bbd0ba50202917f874abe95c39e09762 100644 (file)
@@ -135,7 +135,7 @@ static struct fasync_struct *rtc_async_queue;
 static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
 
 #ifdef RTC_IRQ
-static void rtc_dropped_irq(unsigned long data);
+static void rtc_dropped_irq(struct timer_list *unused);
 
 static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
 #endif
@@ -1171,7 +1171,7 @@ module_exit(rtc_exit);
  *     for something that requires a steady > 1KHz signal anyways.)
  */
 
-static void rtc_dropped_irq(unsigned long data)
+static void rtc_dropped_irq(struct timer_list *unused)
 {
        unsigned long freq;
 
index 461bf0b8a09473dbadc89b9259473dc36be2cd77..230b99288024994b800c3e9adb42dfc8c0ea4c05 100644 (file)
@@ -22,9 +22,9 @@
 #include "tpm.h"
 #include "tpm-dev.h"
 
-static void user_reader_timeout(unsigned long ptr)
+static void user_reader_timeout(struct timer_list *t)
 {
-       struct file_priv *priv = (struct file_priv *)ptr;
+       struct file_priv *priv = from_timer(priv, t, user_read_timer);
 
        pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
                task_tgid_nr(current));
@@ -48,8 +48,7 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
        priv->chip = chip;
        atomic_set(&priv->data_pending, 0);
        mutex_init(&priv->buffer_mutex);
-       setup_timer(&priv->user_read_timer, user_reader_timeout,
-                       (unsigned long)priv);
+       timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
        INIT_WORK(&priv->work, timeout_work);
 
        file->private_data = priv;
index aadabd9d1e2b68d69d9cd491a5fab7037747515b..cd8d689138ff9917822aec3416a101bd0eb665c6 100644 (file)
 #include <linux/of.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
 
 #include "pmc.h"
 
-#define UTMI_FIXED_MUL         40
+/*
+ * The purpose of this clock is to generate a 480 MHz signal. A different
+ * rate can't be configured.
+ */
+#define UTMI_RATE      480000000
 
 struct clk_utmi {
        struct clk_hw hw;
-       struct regmap *regmap;
+       struct regmap *regmap_pmc;
+       struct regmap *regmap_sfr;
 };
 
 #define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
@@ -37,13 +43,54 @@ static inline bool clk_utmi_ready(struct regmap *regmap)
 
 static int clk_utmi_prepare(struct clk_hw *hw)
 {
+       struct clk_hw *hw_parent;
        struct clk_utmi *utmi = to_clk_utmi(hw);
        unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT |
                            AT91_PMC_BIASEN;
+       unsigned int utmi_ref_clk_freq;
+       unsigned long parent_rate;
+
+       /*
+        * If mainck rate is different from 12 MHz, we have to configure the
+        * FREQ field of the SFR_UTMICKTRIM register to generate properly
+        * the utmi clock.
+        */
+       hw_parent = clk_hw_get_parent(hw);
+       parent_rate = clk_hw_get_rate(hw_parent);
+
+       switch (parent_rate) {
+       case 12000000:
+               utmi_ref_clk_freq = 0;
+               break;
+       case 16000000:
+               utmi_ref_clk_freq = 1;
+               break;
+       case 24000000:
+               utmi_ref_clk_freq = 2;
+               break;
+       /*
+        * Not supported on SAMA5D2 but it's not an issue since MAINCK
+        * maximum value is 24 MHz.
+        */
+       case 48000000:
+               utmi_ref_clk_freq = 3;
+               break;
+       default:
+               pr_err("UTMICK: unsupported mainck rate\n");
+               return -EINVAL;
+       }
 
-       regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr);
+       if (utmi->regmap_sfr) {
+               regmap_update_bits(utmi->regmap_sfr, AT91_SFR_UTMICKTRIM,
+                                  AT91_UTMICKTRIM_FREQ, utmi_ref_clk_freq);
+       } else if (utmi_ref_clk_freq) {
+               pr_err("UTMICK: sfr node required\n");
+               return -EINVAL;
+       }
 
-       while (!clk_utmi_ready(utmi->regmap))
+       regmap_update_bits(utmi->regmap_pmc, AT91_CKGR_UCKR, uckr, uckr);
+
+       while (!clk_utmi_ready(utmi->regmap_pmc))
                cpu_relax();
 
        return 0;
@@ -53,21 +100,22 @@ static int clk_utmi_is_prepared(struct clk_hw *hw)
 {
        struct clk_utmi *utmi = to_clk_utmi(hw);
 
-       return clk_utmi_ready(utmi->regmap);
+       return clk_utmi_ready(utmi->regmap_pmc);
 }
 
 static void clk_utmi_unprepare(struct clk_hw *hw)
 {
        struct clk_utmi *utmi = to_clk_utmi(hw);
 
-       regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0);
+       regmap_update_bits(utmi->regmap_pmc, AT91_CKGR_UCKR,
+                          AT91_PMC_UPLLEN, 0);
 }
 
 static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
                                          unsigned long parent_rate)
 {
-       /* UTMI clk is a fixed clk multiplier */
-       return parent_rate * UTMI_FIXED_MUL;
+       /* UTMI clk rate is fixed. */
+       return UTMI_RATE;
 }
 
 static const struct clk_ops utmi_ops = {
@@ -78,7 +126,7 @@ static const struct clk_ops utmi_ops = {
 };
 
 static struct clk_hw * __init
-at91_clk_register_utmi(struct regmap *regmap,
+at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
                       const char *name, const char *parent_name)
 {
        struct clk_utmi *utmi;
@@ -97,7 +145,8 @@ at91_clk_register_utmi(struct regmap *regmap,
        init.flags = CLK_SET_RATE_GATE;
 
        utmi->hw.init = &init;
-       utmi->regmap = regmap;
+       utmi->regmap_pmc = regmap_pmc;
+       utmi->regmap_sfr = regmap_sfr;
 
        hw = &utmi->hw;
        ret = clk_hw_register(NULL, &utmi->hw);
@@ -114,17 +163,35 @@ static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
        struct clk_hw *hw;
        const char *parent_name;
        const char *name = np->name;
-       struct regmap *regmap;
+       struct regmap *regmap_pmc, *regmap_sfr;
 
        parent_name = of_clk_get_parent_name(np, 0);
 
        of_property_read_string(np, "clock-output-names", &name);
 
-       regmap = syscon_node_to_regmap(of_get_parent(np));
-       if (IS_ERR(regmap))
+       regmap_pmc = syscon_node_to_regmap(of_get_parent(np));
+       if (IS_ERR(regmap_pmc))
                return;
 
-       hw = at91_clk_register_utmi(regmap, name, parent_name);
+       /*
+        * If the device supports different mainck rates, this value has to be
+        * set in the UTMI Clock Trimming register.
+        * - 9x5: mainck supports several rates but it is indicated that a
+        *   12 MHz is needed in case of USB.
+        * - sama5d3 and sama5d2: mainck supports several rates. Configuring
+        *   the FREQ field of the UTMI Clock Trimming register is mandatory.
+        * - sama5d4: mainck is at 12 MHz.
+        *
+        * We only need to retrieve sama5d3 or sama5d2 sfr regmap.
+        */
+       regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d3-sfr");
+       if (IS_ERR(regmap_sfr)) {
+               regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+               if (IS_ERR(regmap_sfr))
+                       regmap_sfr = NULL;
+       }
+
+       hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name);
        if (IS_ERR(hw))
                return;
 
index c37a7f0e83aafc881e54f32a82f868ae346f6eb1..281f4322355c135eded2fc5533a54498a0f0f49f 100644 (file)
@@ -579,18 +579,13 @@ static u32 *parent_process(const char *clocks[],
         */
        parent_names = kmalloc_array(parent_count, sizeof(*parent_names),
                               GFP_KERNEL);
-       if (!parent_names) {
-               pr_err("%s: error allocating %u parent names\n", __func__,
-                               parent_count);
+       if (!parent_names)
                return ERR_PTR(-ENOMEM);
-       }
 
        /* There is at least one parent, so allocate a selector array */
        parent_sel = kmalloc_array(parent_count, sizeof(*parent_sel),
                                   GFP_KERNEL);
        if (!parent_sel) {
-               pr_err("%s: error allocating %u parent selectors\n", __func__,
-                               parent_count);
                kfree(parent_names);
 
                return ERR_PTR(-ENOMEM);
index c933be01c7db66f999331b9d187c1125682b127e..0a7e7d5a750605c5be12d953ac912f3fb9105962 100644 (file)
@@ -665,7 +665,7 @@ static int cdce925_probe(struct i2c_client *client,
        init.ops = &cdce925_pll_ops;
        init.flags = 0;
        init.parent_names = &parent_name;
-       init.num_parents = parent_name ? 1 : 0;
+       init.num_parents = 1;
 
        /* Register PLL clocks */
        for (i = 0; i < data->chip_info->num_plls; ++i) {
index 86b245746a6bad5895b267101268124e5050c4b9..151513c655c3c24becbded77921ff3b290630671 100644 (file)
@@ -15,9 +15,7 @@
 #include <linux/clk-provider.h>
 #include <linux/export.h>
 #include <linux/slab.h>
-#include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
 #include <linux/err.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
@@ -95,14 +93,12 @@ const struct clk_ops clk_gpio_mux_ops = {
 EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
 
 static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, unsigned gpio,
-               bool active_low, unsigned long flags,
-               const struct clk_ops *clk_gpio_ops)
+               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+               unsigned long flags, const struct clk_ops *clk_gpio_ops)
 {
        struct clk_gpio *clk_gpio;
        struct clk_hw *hw;
        struct clk_init_data init = {};
-       unsigned long gpio_flags;
        int err;
 
        if (dev)
@@ -113,32 +109,13 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
        if (!clk_gpio)
                return ERR_PTR(-ENOMEM);
 
-       if (active_low)
-               gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
-       else
-               gpio_flags = GPIOF_OUT_INIT_LOW;
-
-       if (dev)
-               err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
-       else
-               err = gpio_request_one(gpio, gpio_flags, name);
-       if (err) {
-               if (err != -EPROBE_DEFER)
-                       pr_err("%s: %s: Error requesting clock control gpio %u\n",
-                                       __func__, name, gpio);
-               if (!dev)
-                       kfree(clk_gpio);
-
-               return ERR_PTR(err);
-       }
-
        init.name = name;
        init.ops = clk_gpio_ops;
        init.flags = flags | CLK_IS_BASIC;
        init.parent_names = parent_names;
        init.num_parents = num_parents;
 
-       clk_gpio->gpiod = gpio_to_desc(gpio);
+       clk_gpio->gpiod = gpiod;
        clk_gpio->hw.init = &init;
 
        hw = &clk_gpio->hw;
@@ -151,7 +128,6 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
                return hw;
 
        if (!dev) {
-               gpiod_put(clk_gpio->gpiod);
                kfree(clk_gpio);
        }
 
@@ -164,29 +140,27 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
  * @dev: device that is registering this clock
  * @name: name of this clock
  * @parent_name: name of this clock's parent
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
+ * @gpiod: gpio descriptor to gate this clock
  * @flags: clock flags
  */
 struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, unsigned gpio, bool active_low,
+               const char *parent_name, struct gpio_desc *gpiod,
                unsigned long flags)
 {
        return clk_register_gpio(dev, name,
                        (parent_name ? &parent_name : NULL),
-                       (parent_name ? 1 : 0), gpio, active_low, flags,
+                       (parent_name ? 1 : 0), gpiod, flags,
                        &clk_gpio_gate_ops);
 }
 EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
 
 struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, unsigned gpio, bool active_low,
+               const char *parent_name, struct gpio_desc *gpiod,
                unsigned long flags)
 {
        struct clk_hw *hw;
 
-       hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpio, active_low,
-                                      flags);
+       hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpiod, flags);
        if (IS_ERR(hw))
                return ERR_CAST(hw);
        return hw->clk;
@@ -199,13 +173,12 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
  * @name: name of this clock
  * @parent_names: names of this clock's parents
  * @num_parents: number of parents listed in @parent_names
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
+ * @gpiod: gpio descriptor to gate this clock
  * @flags: clock flags
  */
 struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, unsigned gpio,
-               bool active_low, unsigned long flags)
+               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+               unsigned long flags)
 {
        if (num_parents != 2) {
                pr_err("mux-clock %s must have 2 parents\n", name);
@@ -213,18 +186,18 @@ struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
        }
 
        return clk_register_gpio(dev, name, parent_names, num_parents,
-                       gpio, active_low, flags, &clk_gpio_mux_ops);
+                       gpiod, flags, &clk_gpio_mux_ops);
 }
 EXPORT_SYMBOL_GPL(clk_hw_register_gpio_mux);
 
 struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, unsigned gpio,
-               bool active_low, unsigned long flags)
+               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+               unsigned long flags)
 {
        struct clk_hw *hw;
 
        hw = clk_hw_register_gpio_mux(dev, name, parent_names, num_parents,
-                       gpio, active_low, flags);
+                       gpiod, flags);
        if (IS_ERR(hw))
                return ERR_CAST(hw);
        return hw->clk;
@@ -236,10 +209,10 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
        struct device_node *node = pdev->dev.of_node;
        const char **parent_names, *gpio_name;
        unsigned int num_parents;
-       int gpio;
-       enum of_gpio_flags of_flags;
+       struct gpio_desc *gpiod;
        struct clk *clk;
-       bool active_low, is_mux;
+       bool is_mux;
+       int ret;
 
        num_parents = of_clk_get_parent_count(node);
        if (num_parents) {
@@ -255,28 +228,27 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
 
        is_mux = of_device_is_compatible(node, "gpio-mux-clock");
 
-       gpio_name = is_mux ? "select-gpios" : "enable-gpios";
-       gpio = of_get_named_gpio_flags(node, gpio_name, 0, &of_flags);
-       if (gpio < 0) {
-               if (gpio == -EPROBE_DEFER)
+       gpio_name = is_mux ? "select" : "enable";
+       gpiod = devm_gpiod_get(&pdev->dev, gpio_name, GPIOD_OUT_LOW);
+       if (IS_ERR(gpiod)) {
+               ret = PTR_ERR(gpiod);
+               if (ret == -EPROBE_DEFER)
                        pr_debug("%s: %s: GPIOs not yet available, retry later\n",
                                        node->name, __func__);
                else
-                       pr_err("%s: %s: Can't get '%s' DT property\n",
+                       pr_err("%s: %s: Can't get '%s' named GPIO property\n",
                                        node->name, __func__,
                                        gpio_name);
-               return gpio;
+               return ret;
        }
 
-       active_low = of_flags & OF_GPIO_ACTIVE_LOW;
-
        if (is_mux)
                clk = clk_register_gpio_mux(&pdev->dev, node->name,
-                               parent_names, num_parents, gpio, active_low, 0);
+                               parent_names, num_parents, gpiod, 0);
        else
                clk = clk_register_gpio_gate(&pdev->dev, node->name,
-                               parent_names ?  parent_names[0] : NULL, gpio,
-                               active_low, 0);
+                               parent_names ?  parent_names[0] : NULL, gpiod,
+                               0);
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
index bbf237173b37bff255b3d00cfc60016c1a9e93f3..c4ee280f454d9213b2bd1a82ac576e7fd790fb42 100644 (file)
@@ -139,7 +139,7 @@ static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
        val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
        val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
 
-       dev_dbg(clk->dev, "write configurarion: %#x\n", val);
+       dev_dbg(clk->dev, "write configuration: %#x\n", val);
 
        hsdk_pll_write(clk, CGU_PLL_CTRL, val);
 }
@@ -169,7 +169,7 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
 
        val = hsdk_pll_read(clk, CGU_PLL_CTRL);
 
-       dev_dbg(clk->dev, "current configurarion: %#x\n", val);
+       dev_dbg(clk->dev, "current configuration: %#x\n", val);
 
        /* Check if PLL is disabled */
        if (val & CGU_PLL_CTRL_PD)
index 16a3d5717f4edc371da411168276371f0a656cac..39cabe157163b9b0dd3498c65a242fe1614253ee 100644 (file)
@@ -134,11 +134,9 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
        }
 
        /* allocate the mux */
-       mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
-       if (!mux) {
-               pr_err("%s: could not allocate mux clk\n", __func__);
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        if (clk_mux_flags & CLK_MUX_READ_ONLY)
index a94c3f56c590967743fade8e4912ab8f8a4cd2b2..61c3e40507d31846db8ea23dd745cd47a3e01872 100644 (file)
@@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
        mux_ops = div_ops = gate_ops = NULL;
        mux_hw = div_hw = gate_hw = NULL;
 
-       if (gcfg->mux && gcfg->mux) {
+       if (gcfg->mux && cfg->mux) {
                mux = _get_cmux(base + cfg->mux->offset,
                                cfg->mux->shift,
                                cfg->mux->width,
@@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
                }
        }
 
-       if (gcfg->gate && gcfg->gate) {
+       if (gcfg->gate && cfg->gate) {
                gate = _get_cgate(base + cfg->gate->offset,
                                cfg->gate->bit_idx,
                                gcfg->gate->flags, lock);
index 7b222a5db9319b46efe373e6fb859b412457a510..25dfe050ae9f8d3dae9cb393a94590dfad52c7dd 100644 (file)
@@ -82,7 +82,7 @@ static const struct clk_ops twl6040_pdmclk_ops = {
        .recalc_rate = twl6040_pdmclk_recalc_rate,
 };
 
-static struct clk_init_data twl6040_pdmclk_init = {
+static const struct clk_init_data twl6040_pdmclk_init = {
        .name = "pdmclk",
        .ops = &twl6040_pdmclk_ops,
        .flags = CLK_GET_RATE_NOCACHE,
index ec8aafda6e243216b523f6630cf1a148f254c49a..7b3e1921771fa236d19401094eccbc4b63d0f231 100644 (file)
 #define U300_SYSCON_S0CCR_CLOCK_FREQ_MASK                      (0x01E0)
 #define U300_SYSCON_S0CCR_CLOCK_SELECT_MASK                    (0x001E)
 #define U300_SYSCON_S0CCR_CLOCK_ENABLE                         (0x0001)
-#define U300_SYSCON_S0CCR_SEL_MCLK                             (0x8<<1)
-#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK                      (0xA<<1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK                     (0xC<<1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK                     (0xD<<1)
-#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK                   (0xE<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK                    (0x0<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK                      (0x2<<1)
-#define U300_SYSCON_S0CCR_SEL_RTC_CLK                          (0x4<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK                   (0x6<<1)
+#define U300_SYSCON_S0CCR_SEL_MCLK                             (0x8 << 1)
+#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK                      (0xA << 1)
+#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK                     (0xC << 1)
+#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK                     (0xD << 1)
+#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK                   (0xE << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK                    (0x0 << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK                      (0x2 << 1)
+#define U300_SYSCON_S0CCR_SEL_RTC_CLK                          (0x4 << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK                   (0x6 << 1)
 /* SYS_1_CLK_CONTROL second clock control 16 bit (R/W) */
 #define U300_SYSCON_S1CCR                                      (0x124)
 #define U300_SYSCON_S1CCR_FIELD_MASK                           (0x43FF)
 #define U300_SYSCON_S1CCR_CLOCK_FREQ_MASK                      (0x01E0)
 #define U300_SYSCON_S1CCR_CLOCK_SELECT_MASK                    (0x001E)
 #define U300_SYSCON_S1CCR_CLOCK_ENABLE                         (0x0001)
-#define U300_SYSCON_S1CCR_SEL_MCLK                             (0x8<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK                      (0xA<<1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK                     (0xC<<1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK                     (0xD<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK                   (0xE<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK                    (0x0<<1)
-#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK                      (0x2<<1)
-#define U300_SYSCON_S1CCR_SEL_RTC_CLK                          (0x4<<1)
-#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK                   (0x6<<1)
-/* SYS_2_CLK_CONTROL third clock contol 16 bit (R/W) */
+#define U300_SYSCON_S1CCR_SEL_MCLK                             (0x8 << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK                      (0xA << 1)
+#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK                     (0xC << 1)
+#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK                     (0xD << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK                   (0xE << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK                    (0x0 << 1)
+#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK                      (0x2 << 1)
+#define U300_SYSCON_S1CCR_SEL_RTC_CLK                          (0x4 << 1)
+#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK                   (0x6 << 1)
+/* SYS_2_CLK_CONTROL third clock control 16 bit (R/W) */
 #define U300_SYSCON_S2CCR                                      (0x128)
 #define U300_SYSCON_S2CCR_FIELD_MASK                           (0xC3FF)
 #define U300_SYSCON_S2CCR_CLK_STEAL                            (0x8000)
 #define U300_SYSCON_S2CCR_CLOCK_FREQ_MASK                      (0x01E0)
 #define U300_SYSCON_S2CCR_CLOCK_SELECT_MASK                    (0x001E)
 #define U300_SYSCON_S2CCR_CLOCK_ENABLE                         (0x0001)
-#define U300_SYSCON_S2CCR_SEL_MCLK                             (0x8<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK                      (0xA<<1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK                     (0xC<<1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK                     (0xD<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK                   (0xE<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK                    (0x0<<1)
-#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK                      (0x2<<1)
-#define U300_SYSCON_S2CCR_SEL_RTC_CLK                          (0x4<<1)
-#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK                   (0x6<<1)
+#define U300_SYSCON_S2CCR_SEL_MCLK                             (0x8 << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK                      (0xA << 1)
+#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK                     (0xC << 1)
+#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK                     (0xD << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK                   (0xE << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK                    (0x0 << 1)
+#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK                      (0x2 << 1)
+#define U300_SYSCON_S2CCR_SEL_RTC_CLK                          (0x4 << 1)
+#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK                   (0x6 << 1)
 /* SC_PLL_IRQ_CONTROL 16bit (R/W) */
 #define U300_SYSCON_PICR                                       (0x0130)
 #define U300_SYSCON_PICR_MASK                                  (0x00FF)
  *  +- ISP Image Signal Processor (U335 only)
  *  +- CDS (U335 only)
  *  +- DMA Direct Memory Access Controller
- *  +- AAIF APP/ACC Inteface (Mobile Scalable Link, MSL)
+ *  +- AAIF APP/ACC Interface (Mobile Scalable Link, MSL)
  *  +- APEX
  *  +- VIDEO_ENC AVE2/3 Video Encoder
  *  +- XGAM Graphics Accelerator Controller
@@ -568,14 +568,14 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
        struct clk_syscon *sclk = to_syscon(hw);
        u16 perf = syscon_get_perf();
 
-       switch(sclk->clk_val) {
+       switch (sclk->clk_val) {
        case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
        case U300_SYSCON_SBCER_I2C0_CLK_EN:
        case U300_SYSCON_SBCER_I2C1_CLK_EN:
        case U300_SYSCON_SBCER_MMC_CLK_EN:
        case U300_SYSCON_SBCER_SPI_CLK_EN:
                /* The FAST clocks have one progression */
-               switch(perf) {
+               switch (perf) {
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
                        return 13000000;
@@ -586,7 +586,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
        case U300_SYSCON_SBCER_NANDIF_CLK_EN:
        case U300_SYSCON_SBCER_XGAM_CLK_EN:
                /* AMBA interconnect peripherals */
-               switch(perf) {
+               switch (perf) {
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
                        return 6500000;
@@ -598,7 +598,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
        case U300_SYSCON_SBCER_SEMI_CLK_EN:
        case U300_SYSCON_SBCER_EMIF_CLK_EN:
                /* EMIF speeds */
-               switch(perf) {
+               switch (perf) {
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
                        return 13000000;
@@ -609,7 +609,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
                }
        case U300_SYSCON_SBCER_CPU_CLK_EN:
                /* And the fast CPU clock */
-               switch(perf) {
+               switch (perf) {
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
                case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
                        return 13000000;
@@ -702,12 +702,10 @@ syscon_clk_register(struct device *dev, const char *name,
        struct clk_init_data init;
        int ret;
 
-       sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL);
-       if (!sclk) {
-               pr_err("could not allocate syscon clock %s\n",
-                       name);
+       sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
+       if (!sclk)
                return ERR_PTR(-ENOMEM);
-       }
+
        init.name = name;
        init.ops = &syscon_clk_ops;
        init.flags = flags;
@@ -1123,12 +1121,10 @@ mclk_clk_register(struct device *dev, const char *name,
        struct clk_init_data init;
        int ret;
 
-       mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL);
-       if (!mclk) {
-               pr_err("could not allocate MMC/SD clock %s\n",
-                      name);
+       mclk = kzalloc(sizeof(*mclk), GFP_KERNEL);
+       if (!mclk)
                return ERR_PTR(-ENOMEM);
-       }
+
        init.name = "mclk";
        init.ops = &mclk_ops;
        init.flags = 0;
index a47960aacfa52c30749787ea3cb1a3d35b0576d0..146769532325cafa3e922ebf4681318d82797736 100644 (file)
@@ -52,7 +52,7 @@ static const struct clk_ops wm831x_xtal_ops = {
        .recalc_rate = wm831x_xtal_recalc_rate,
 };
 
-static struct clk_init_data wm831x_xtal_init = {
+static const struct clk_init_data wm831x_xtal_init = {
        .name = "xtal",
        .ops = &wm831x_xtal_ops,
 };
@@ -225,7 +225,7 @@ static const struct clk_ops wm831x_fll_ops = {
        .get_parent = wm831x_fll_get_parent,
 };
 
-static struct clk_init_data wm831x_fll_init = {
+static const struct clk_init_data wm831x_fll_init = {
        .name = "fll",
        .ops = &wm831x_fll_ops,
        .parent_names = wm831x_fll_parents,
@@ -338,7 +338,7 @@ static const struct clk_ops wm831x_clkout_ops = {
        .set_parent = wm831x_clkout_set_parent,
 };
 
-static struct clk_init_data wm831x_clkout_init = {
+static const struct clk_init_data wm831x_clkout_init = {
        .name = "clkout",
        .ops = &wm831x_clkout_ops,
        .parent_names = wm831x_clkout_parents,
index 4c75821a3933c26cd1c0582ce7ae13b145836f40..531b030d4d4e7392cd827b9c9f40f5293d59b1f2 100644 (file)
@@ -146,10 +146,8 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
 
        /* allocate the APM clock structure */
        apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
-       if (!apmclk) {
-               pr_err("%s: could not allocate APM clk\n", __func__);
+       if (!apmclk)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &xgene_clk_pll_ops;
@@ -191,7 +189,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty
        int version = xgene_pllclk_version(np);
 
        reg = of_iomap(np, 0);
-       if (reg == NULL) {
+       if (!reg) {
                pr_err("Unable to map CSR register for %pOF\n", np);
                return;
        }
@@ -467,7 +465,7 @@ static int xgene_clk_enable(struct clk_hw *hw)
        if (pclk->lock)
                spin_lock_irqsave(pclk->lock, flags);
 
-       if (pclk->param.csr_reg != NULL) {
+       if (pclk->param.csr_reg) {
                pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
                /* First enable the clock */
                data = xgene_clk_read(pclk->param.csr_reg +
@@ -507,7 +505,7 @@ static void xgene_clk_disable(struct clk_hw *hw)
        if (pclk->lock)
                spin_lock_irqsave(pclk->lock, flags);
 
-       if (pclk->param.csr_reg != NULL) {
+       if (pclk->param.csr_reg) {
                pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
                /* First put the CSR in reset */
                data = xgene_clk_read(pclk->param.csr_reg +
@@ -533,7 +531,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
        struct xgene_clk *pclk = to_xgene_clk(hw);
        u32 data = 0;
 
-       if (pclk->param.csr_reg != NULL) {
+       if (pclk->param.csr_reg) {
                pr_debug("%s clock checking\n", clk_hw_get_name(hw));
                data = xgene_clk_read(pclk->param.csr_reg +
                                        pclk->param.reg_clk_offset);
@@ -542,7 +540,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
                                                        "disabled");
        }
 
-       if (pclk->param.csr_reg == NULL)
+       if (!pclk->param.csr_reg)
                return 1;
        return data & pclk->param.reg_clk_mask ? 1 : 0;
 }
@@ -650,10 +648,8 @@ static struct clk *xgene_register_clk(struct device *dev,
 
        /* allocate the APM clock structure */
        apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
-       if (!apmclk) {
-               pr_err("%s: could not allocate APM clk\n", __func__);
+       if (!apmclk)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &xgene_clk_ops;
@@ -709,7 +705,7 @@ static void __init xgene_devclk_init(struct device_node *np)
                        break;
                }
                map_res = of_iomap(np, i);
-               if (map_res == NULL) {
+               if (!map_res) {
                        pr_err("Unable to map resource %d for %pOF\n", i, np);
                        goto err;
                }
index c8d83acda0061977218d1fd084bc6faddff4091c..647d056df88c8dd2a7d8288e35fa2eeba9b7705b 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/pm_runtime.h>
 #include <linux/sched.h>
 #include <linux/clkdev.h>
 
@@ -46,6 +47,7 @@ struct clk_core {
        const struct clk_ops    *ops;
        struct clk_hw           *hw;
        struct module           *owner;
+       struct device           *dev;
        struct clk_core         *parent;
        const char              **parent_names;
        struct clk_core         **parents;
@@ -87,6 +89,26 @@ struct clk {
        struct hlist_node clks_node;
 };
 
+/***           runtime pm          ***/
+static int clk_pm_runtime_get(struct clk_core *core)
+{
+       int ret = 0;
+
+       if (!core->dev)
+               return 0;
+
+       ret = pm_runtime_get_sync(core->dev);
+       return ret < 0 ? ret : 0;
+}
+
+static void clk_pm_runtime_put(struct clk_core *core)
+{
+       if (!core->dev)
+               return;
+
+       pm_runtime_put_sync(core->dev);
+}
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
@@ -150,6 +172,8 @@ static void clk_enable_unlock(unsigned long flags)
 
 static bool clk_core_is_prepared(struct clk_core *core)
 {
+       bool ret = false;
+
        /*
         * .is_prepared is optional for clocks that can prepare
         * fall back to software usage counter if it is missing
@@ -157,11 +181,18 @@ static bool clk_core_is_prepared(struct clk_core *core)
        if (!core->ops->is_prepared)
                return core->prepare_count;
 
-       return core->ops->is_prepared(core->hw);
+       if (!clk_pm_runtime_get(core)) {
+               ret = core->ops->is_prepared(core->hw);
+               clk_pm_runtime_put(core);
+       }
+
+       return ret;
 }
 
 static bool clk_core_is_enabled(struct clk_core *core)
 {
+       bool ret = false;
+
        /*
         * .is_enabled is only mandatory for clocks that gate
         * fall back to software usage counter if .is_enabled is missing
@@ -169,7 +200,29 @@ static bool clk_core_is_enabled(struct clk_core *core)
        if (!core->ops->is_enabled)
                return core->enable_count;
 
-       return core->ops->is_enabled(core->hw);
+       /*
+        * Check if clock controller's device is runtime active before
+        * calling .is_enabled callback. If not, assume that clock is
+        * disabled, because we might be called from atomic context, from
+        * which pm_runtime_get() is not allowed.
+        * This function is called mainly from clk_disable_unused_subtree,
+        * which ensures proper runtime pm activation of controller before
+        * taking enable spinlock, but the below check is needed if one tries
+        * to call it from other places.
+        */
+       if (core->dev) {
+               pm_runtime_get_noresume(core->dev);
+               if (!pm_runtime_active(core->dev)) {
+                       ret = false;
+                       goto done;
+               }
+       }
+
+       ret = core->ops->is_enabled(core->hw);
+done:
+       clk_pm_runtime_put(core);
+
+       return ret;
 }
 
 /***    helper functions   ***/
@@ -489,6 +542,8 @@ static void clk_core_unprepare(struct clk_core *core)
        if (core->ops->unprepare)
                core->ops->unprepare(core->hw);
 
+       clk_pm_runtime_put(core);
+
        trace_clk_unprepare_complete(core);
        clk_core_unprepare(core->parent);
 }
@@ -530,10 +585,14 @@ static int clk_core_prepare(struct clk_core *core)
                return 0;
 
        if (core->prepare_count == 0) {
-               ret = clk_core_prepare(core->parent);
+               ret = clk_pm_runtime_get(core);
                if (ret)
                        return ret;
 
+               ret = clk_core_prepare(core->parent);
+               if (ret)
+                       goto runtime_put;
+
                trace_clk_prepare(core);
 
                if (core->ops->prepare)
@@ -541,15 +600,18 @@ static int clk_core_prepare(struct clk_core *core)
 
                trace_clk_prepare_complete(core);
 
-               if (ret) {
-                       clk_core_unprepare(core->parent);
-                       return ret;
-               }
+               if (ret)
+                       goto unprepare;
        }
 
        core->prepare_count++;
 
        return 0;
+unprepare:
+       clk_core_unprepare(core->parent);
+runtime_put:
+       clk_pm_runtime_put(core);
+       return ret;
 }
 
 static int clk_core_prepare_lock(struct clk_core *core)
@@ -745,6 +807,9 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
        if (core->flags & CLK_IGNORE_UNUSED)
                return;
 
+       if (clk_pm_runtime_get(core))
+               return;
+
        if (clk_core_is_prepared(core)) {
                trace_clk_unprepare(core);
                if (core->ops->unprepare_unused)
@@ -753,6 +818,8 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
                        core->ops->unprepare(core->hw);
                trace_clk_unprepare_complete(core);
        }
+
+       clk_pm_runtime_put(core);
 }
 
 static void clk_disable_unused_subtree(struct clk_core *core)
@@ -768,6 +835,9 @@ static void clk_disable_unused_subtree(struct clk_core *core)
        if (core->flags & CLK_OPS_PARENT_ENABLE)
                clk_core_prepare_enable(core->parent);
 
+       if (clk_pm_runtime_get(core))
+               goto unprepare_out;
+
        flags = clk_enable_lock();
 
        if (core->enable_count)
@@ -792,6 +862,8 @@ static void clk_disable_unused_subtree(struct clk_core *core)
 
 unlock_out:
        clk_enable_unlock(flags);
+       clk_pm_runtime_put(core);
+unprepare_out:
        if (core->flags & CLK_OPS_PARENT_ENABLE)
                clk_core_disable_unprepare(core->parent);
 }
@@ -1038,9 +1110,13 @@ EXPORT_SYMBOL_GPL(clk_get_accuracy);
 static unsigned long clk_recalc(struct clk_core *core,
                                unsigned long parent_rate)
 {
-       if (core->ops->recalc_rate)
-               return core->ops->recalc_rate(core->hw, parent_rate);
-       return parent_rate;
+       unsigned long rate = parent_rate;
+
+       if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
+               rate = core->ops->recalc_rate(core->hw, parent_rate);
+               clk_pm_runtime_put(core);
+       }
+       return rate;
 }
 
 /**
@@ -1565,6 +1641,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
 {
        struct clk_core *top, *fail_clk;
        unsigned long rate = req_rate;
+       int ret = 0;
 
        if (!core)
                return 0;
@@ -1581,21 +1658,28 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
        if (!top)
                return -EINVAL;
 
+       ret = clk_pm_runtime_get(core);
+       if (ret)
+               return ret;
+
        /* notify that we are about to change rates */
        fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
        if (fail_clk) {
                pr_debug("%s: failed to set %s rate\n", __func__,
                                fail_clk->name);
                clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err;
        }
 
        /* change the rates */
        clk_change_rate(top);
 
        core->req_rate = req_rate;
+err:
+       clk_pm_runtime_put(core);
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -1826,12 +1910,16 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
                p_rate = parent->rate;
        }
 
+       ret = clk_pm_runtime_get(core);
+       if (ret)
+               goto out;
+
        /* propagate PRE_RATE_CHANGE notifications */
        ret = __clk_speculate_rates(core, p_rate);
 
        /* abort if a driver objects */
        if (ret & NOTIFY_STOP_MASK)
-               goto out;
+               goto runtime_put;
 
        /* do the re-parent */
        ret = __clk_set_parent(core, parent, p_index);
@@ -1844,6 +1932,8 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
                __clk_recalc_accuracies(core);
        }
 
+runtime_put:
+       clk_pm_runtime_put(core);
 out:
        clk_prepare_unlock();
 
@@ -2350,7 +2440,7 @@ static inline void clk_debug_unregister(struct clk_core *core)
  */
 static int __clk_core_init(struct clk_core *core)
 {
-       int i, ret = 0;
+       int i, ret;
        struct clk_core *orphan;
        struct hlist_node *tmp2;
        unsigned long rate;
@@ -2360,6 +2450,10 @@ static int __clk_core_init(struct clk_core *core)
 
        clk_prepare_lock();
 
+       ret = clk_pm_runtime_get(core);
+       if (ret)
+               goto unlock;
+
        /* check to see if a clock with this name is already registered */
        if (clk_core_lookup(core->name)) {
                pr_debug("%s: clk %s already initialized\n",
@@ -2512,6 +2606,8 @@ static int __clk_core_init(struct clk_core *core)
 
        kref_init(&core->ref);
 out:
+       clk_pm_runtime_put(core);
+unlock:
        clk_prepare_unlock();
 
        if (!ret)
@@ -2583,6 +2679,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
                goto fail_name;
        }
        core->ops = hw->init->ops;
+       if (dev && pm_runtime_enabled(dev))
+               core->dev = dev;
        if (dev && dev->driver)
                core->owner = dev->driver->owner;
        core->hw = hw;
@@ -3177,6 +3275,37 @@ int of_clk_add_hw_provider(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
 
+static void devm_of_clk_release_provider(struct device *dev, void *res)
+{
+       of_clk_del_provider(*(struct device_node **)res);
+}
+
+int devm_of_clk_add_hw_provider(struct device *dev,
+                       struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+                                             void *data),
+                       void *data)
+{
+       struct device_node **ptr, *np;
+       int ret;
+
+       ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
+                          GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       np = dev->of_node;
+       ret = of_clk_add_hw_provider(np, get, data);
+       if (!ret) {
+               *ptr = np;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
+
 /**
  * of_clk_del_provider() - Remove a previously registered clock provider
  * @np: Device node pointer associated with clock provider
@@ -3198,6 +3327,27 @@ void of_clk_del_provider(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_clk_del_provider);
 
+static int devm_clk_provider_match(struct device *dev, void *res, void *data)
+{
+       struct device_node **np = res;
+
+       if (WARN_ON(!np || !*np))
+               return 0;
+
+       return *np == data;
+}
+
+void devm_of_clk_del_provider(struct device *dev)
+{
+       int ret;
+
+       ret = devres_release(dev, devm_of_clk_release_provider,
+                            devm_clk_provider_match, dev->of_node);
+
+       WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_of_clk_del_provider);
+
 static struct clk_hw *
 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
                              struct of_phandle_args *clkspec)
index fa0fba653898b462e6615549b1bd2e94553fffea..77072c7778b9ca5bae9c01592f4ef781f68b6f35 100644 (file)
@@ -415,7 +415,7 @@ static int mmc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
        return mmc_clk_set_timing(hw, rate);
 }
 
-static struct clk_ops clk_mmc_ops = {
+static const struct clk_ops clk_mmc_ops = {
        .prepare = mmc_clk_prepare,
        .determine_rate = mmc_clk_determine_rate,
        .set_rate = mmc_clk_set_rate,
index a18258eb89cb1b1767a5335b7bf8938e66fafab2..f404199596563e34f7b62eead4fc539f084ca79b 100644 (file)
@@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = {
 
 /* crgctrl */
 static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = {
-       { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, },
+       { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, },
        { HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, },
        { HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, },
        { HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, },
index e786d717f75dcf51383627988e22f5956fe74d6c..a87809d4bd525aad9ebffc7a9ef0f9c5a33752d8 100644 (file)
@@ -145,7 +145,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
        { HI6220_BBPPLL_SEL,    "bbppll_sel",    "pll0_bbp_gate",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9,  0, },
        { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
        { HI6220_MMC2_SEL,      "mmc2_sel",      "mmc2_mux1",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
-       { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll",         CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
+       { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll",         CLK_SET_RATE_PARENT|CLK_IS_CRITICAL,   0x270, 12, 0, },
 };
 
 static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
index 14b05efa3c2ae4fca3b86e2afe270801074d066d..9584f0c32dda985528b65baa83ef646ff9654b81 100644 (file)
@@ -208,7 +208,7 @@ static void clk_ether_unprepare(struct clk_hw *hw)
        writel_relaxed(val, clk->ctrl_reg);
 }
 
-static struct clk_ops clk_ether_ops = {
+static const struct clk_ops clk_ether_ops = {
        .prepare = clk_ether_prepare,
        .unprepare = clk_ether_unprepare,
 };
@@ -247,7 +247,7 @@ static void clk_complex_disable(struct clk_hw *hw)
        writel_relaxed(val, clk->phy_reg);
 }
 
-static struct clk_ops clk_complex_ops = {
+static const struct clk_ops clk_complex_ops = {
        .enable = clk_complex_enable,
        .disable = clk_complex_disable,
 };
index 7908bc3c9ec73c0bbc498197d774bef5075797cf..f36bdef9183178f21f1a33327b4e8b586f611345 100644 (file)
@@ -88,7 +88,7 @@ static int clkgate_separated_is_enabled(struct clk_hw *hw)
        return reg ? 1 : 0;
 }
 
-static struct clk_ops clkgate_separated_ops = {
+static const struct clk_ops clkgate_separated_ops = {
        .enable         = clkgate_separated_enable,
        .disable        = clkgate_separated_disable,
        .is_enabled     = clkgate_separated_is_enabled,
@@ -105,10 +105,8 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
        struct clk_init_data init;
 
        sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
-       if (!sclk) {
-               pr_err("%s: fail to allocate separated gated clk\n", __func__);
+       if (!sclk)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &clkgate_separated_ops;
index ed8bb5f7507f2c22aa750367721dbfad8ed5355c..8478948e858e396b3797d99d926c0dfe754cd8fa 100644 (file)
@@ -47,6 +47,8 @@
 #define HI3798CV200_FIXED_12M  81
 #define HI3798CV200_FIXED_48M  82
 #define HI3798CV200_FIXED_60M  83
+#define HI3798CV200_FIXED_166P5M       84
+#define HI3798CV200_SDIO0_MUX  85
 
 #define HI3798CV200_CRG_NR_CLKS                128
 
@@ -63,6 +65,7 @@ static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
        { HI3798CV200_FIXED_75M, "75m", NULL, 0, 75000000, },
        { HI3798CV200_FIXED_100M, "100m", NULL, 0, 100000000, },
        { HI3798CV200_FIXED_150M, "150m", NULL, 0, 150000000, },
+       { HI3798CV200_FIXED_166P5M, "166p5m", NULL, 0, 165000000, },
        { HI3798CV200_FIXED_200M, "200m", NULL, 0, 200000000, },
        { HI3798CV200_FIXED_250M, "250m", NULL, 0, 250000000, },
 };
@@ -75,12 +78,19 @@ static const char *const comphy1_mux_p[] = {
                "100m", "25m"};
 static u32 comphy1_mux_table[] = {2, 3};
 
+static const char *const sdio_mux_p[] = {
+               "100m", "50m", "150m", "166p5m" };
+static u32 sdio_mux_table[] = {0, 1, 2, 3};
+
 static struct hisi_mux_clock hi3798cv200_mux_clks[] = {
        { HI3798CV200_MMC_MUX, "mmc_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
                CLK_SET_RATE_PARENT, 0xa0, 8, 3, 0, mmc_mux_table, },
        { HI3798CV200_COMBPHY1_MUX, "combphy1_mux",
                comphy1_mux_p, ARRAY_SIZE(comphy1_mux_p),
                CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy1_mux_table, },
+       { HI3798CV200_SDIO0_MUX, "sdio0_mux", sdio_mux_p,
+               ARRAY_SIZE(sdio_mux_p), CLK_SET_RATE_PARENT,
+               0x9c, 8, 2, 0, sdio_mux_table, },
 };
 
 static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
@@ -104,7 +114,7 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
        /* SDIO */
        { HISTB_SDIO0_BIU_CLK, "clk_sdio0_biu", "200m",
                        CLK_SET_RATE_PARENT, 0x9c, 0, 0, },
-       { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "mmc_mux",
+       { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "sdio0_mux",
                CLK_SET_RATE_PARENT, 0x9c, 1, 0, },
        /* EMMC */
        { HISTB_MMC_BIU_CLK, "clk_mmc_biu", "200m",
index 5cc99590f9a33a397bc1fc51ffaf821867aeb6e9..6df3389687bc0fe9e4afae264e3c825ea11bb35e 100644 (file)
@@ -72,7 +72,7 @@ static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
        return ret;
 }
 
-static struct clk_ops clk_busy_divider_ops = {
+static const struct clk_ops clk_busy_divider_ops = {
        .recalc_rate = clk_busy_divider_recalc_rate,
        .round_rate = clk_busy_divider_round_rate,
        .set_rate = clk_busy_divider_set_rate,
@@ -147,7 +147,7 @@ static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
        return ret;
 }
 
-static struct clk_ops clk_busy_mux_ops = {
+static const struct clk_ops clk_busy_mux_ops = {
        .get_parent = clk_busy_mux_get_parent,
        .set_parent = clk_busy_mux_set_parent,
 };
index db44a198a0d9999f3e998033a3f8a517035c5e74..60fc9d7a9723959e4b13185d87405a1de8e455d3 100644 (file)
@@ -118,7 +118,7 @@ static void clk_gate2_disable_unused(struct clk_hw *hw)
        spin_unlock_irqrestore(gate->lock, flags);
 }
 
-static struct clk_ops clk_gate2_ops = {
+static const struct clk_ops clk_gate2_ops = {
        .enable = clk_gate2_enable,
        .disable = clk_gate2_disable,
        .disable_unused = clk_gate2_disable_unused,
index c07df719b8a35d16ed88014dcaca37c6d957d158..8d518ad5dc13e5a5c20436950ccf5f45837a5333 100644 (file)
@@ -761,7 +761,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
        clk[IMX6QDL_CLK_GPU3D_CORE]   = imx_clk_gate2("gpu3d_core",    "gpu3d_core_podf",   base + 0x6c, 26);
        clk[IMX6QDL_CLK_HDMI_IAHB]    = imx_clk_gate2("hdmi_iahb",     "ahb",               base + 0x70, 0);
-       clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "video_27m",         base + 0x70, 4);
+       clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "mipi_core_cfg",     base + 0x70, 4);
        clk[IMX6QDL_CLK_I2C1]         = imx_clk_gate2("i2c1",          "ipg_per",           base + 0x70, 6);
        clk[IMX6QDL_CLK_I2C2]         = imx_clk_gate2("i2c2",          "ipg_per",           base + 0x70, 8);
        clk[IMX6QDL_CLK_I2C3]         = imx_clk_gate2("i2c3",          "ipg_per",           base + 0x70, 10);
index 5e8c18afce9ad35742dd378cded8042abe80c922..85c1181644697153c7b52ddd417bbe7630c8b5b1 100644 (file)
@@ -267,7 +267,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
                clks[IMX6ULL_CLK_EPDC_SEL]        = imx_clk_mux("epdc_sel",     base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
        }
        clks[IMX6UL_CLK_ECSPI_SEL]        = imx_clk_mux("ecspi_sel",    base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
-       clks[IMX6UL_CLK_LCDIF_PRE_SEL]    = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
+       clks[IMX6UL_CLK_LCDIF_PRE_SEL]    = imx_clk_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT);
        clks[IMX6UL_CLK_LCDIF_SEL]        = imx_clk_mux("lcdif_sel",    base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
 
        clks[IMX6UL_CLK_LDB_DI0_DIV_SEL]  = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
index 2305699db46798441c7c32ef8910b5fe3354c64a..80dc211eb74bbebe465c40d7bf30eb336d43af89 100644 (file)
@@ -54,11 +54,6 @@ static const char *arm_m4_sel[] = { "osc", "pll_sys_main_240m_clk",
        "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
        "pll_usb_main_clk", };
 
-static const char *arm_m0_sel[] = { "osc", "pll_sys_main_120m_clk",
-       "pll_enet_125m_clk", "pll_sys_pfd2_135m_clk",
-       "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
-       "pll_usb_main_clk", };
-
 static const char *axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
        "pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd5_clk",
        "pll_audio_post_div", "pll_video_main_clk", "pll_sys_pfd7_clk", };
@@ -510,7 +505,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
 
        clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel));
        clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel));
-       clks[IMX7D_ARM_M0_ROOT_SRC] = imx_clk_mux2("arm_m0_src", base + 0x8100, 24, 3, arm_m0_sel, ARRAY_SIZE(arm_m0_sel));
        clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel));
        clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel));
        clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel));
@@ -582,7 +576,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
 
        clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28);
        clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
-       clks[IMX7D_ARM_M0_ROOT_CG] = imx_clk_gate3("arm_m0_cg", "arm_m0_src", base + 0x8100, 28);
        clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate3("axi_cg", "axi_src", base + 0x8800, 28);
        clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28);
        clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28);
@@ -721,7 +714,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
 
        clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3);
        clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
-       clks[IMX7D_ARM_M0_ROOT_DIV] = imx_clk_divider2("arm_m0_div", "arm_m0_cg", base + 0x8100, 0, 3);
        clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6);
        clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6);
        clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6);
@@ -793,11 +785,10 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
 
        clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate4("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0);
        clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0);
-       clks[IMX7D_ARM_M0_ROOT_CLK] = imx_clk_gate4("arm_m0_root_clk", "arm_m0_div", base + 0x4020, 0);
        clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0);
        clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0);
        clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0);
-       clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0);
+       clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0);
        clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0);
        clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0);
        clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0);
index e47a1c2fe8bd01d217ad9adb7c380e9bdab9ec5e..4ba9973d4c1878bff6bae24ca680cc85eb463502 100644 (file)
@@ -107,7 +107,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
        return ull;
 }
 
-static struct clk_ops clk_pllv1_ops = {
+static const struct clk_ops clk_pllv1_ops = {
        .recalc_rate = clk_pllv1_recalc_rate,
 };
 
index 9842d657e974e71539a690fe0b778a82ad3c16dd..85b5cbe9744caff244e41634db93d619ada1123c 100644 (file)
@@ -227,7 +227,7 @@ static void clk_pllv2_unprepare(struct clk_hw *hw)
        __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
 }
 
-static struct clk_ops clk_pllv2_ops = {
+static const struct clk_ops clk_pllv2_ops = {
        .prepare = clk_pllv2_prepare,
        .unprepare = clk_pllv2_unprepare,
        .recalc_rate = clk_pllv2_recalc_rate,
index 28739a9a6e37da73a084196e94436e4d4bf95ed4..59dc0aad553cf00c7b63da33c18d2a71575771df 100644 (file)
@@ -50,6 +50,56 @@ config COMMON_CLK_MT2701_BDPSYS
        ---help---
          This driver supports Mediatek MT2701 bdpsys clocks.
 
+config COMMON_CLK_MT2712
+       bool "Clock driver for Mediatek MT2712"
+       depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+       select COMMON_CLK_MEDIATEK
+       default ARCH_MEDIATEK && ARM64
+       ---help---
+         This driver supports Mediatek MT2712 basic clocks.
+
+config COMMON_CLK_MT2712_BDPSYS
+       bool "Clock driver for Mediatek MT2712 bdpsys"
+       depends on COMMON_CLK_MT2712
+       ---help---
+         This driver supports Mediatek MT2712 bdpsys clocks.
+
+config COMMON_CLK_MT2712_IMGSYS
+       bool "Clock driver for Mediatek MT2712 imgsys"
+       depends on COMMON_CLK_MT2712
+       ---help---
+         This driver supports Mediatek MT2712 imgsys clocks.
+
+config COMMON_CLK_MT2712_JPGDECSYS
+       bool "Clock driver for Mediatek MT2712 jpgdecsys"
+       depends on COMMON_CLK_MT2712
+       ---help---
+         This driver supports Mediatek MT2712 jpgdecsys clocks.
+
+config COMMON_CLK_MT2712_MFGCFG
+       bool "Clock driver for Mediatek MT2712 mfgcfg"
+       depends on COMMON_CLK_MT2712
+       ---help---
+         This driver supports Mediatek MT2712 mfgcfg clocks.
+
+config COMMON_CLK_MT2712_MMSYS
+       bool "Clock driver for Mediatek MT2712 mmsys"
+       depends on COMMON_CLK_MT2712
+       ---help---
+         This driver supports Mediatek MT2712 mmsys clocks.
+
+config COMMON_CLK_MT2712_VDECSYS
+       bool "Clock driver for Mediatek MT2712 vdecsys"
+       depends on COMMON_CLK_MT2712
+       ---help---
+         This driver supports Mediatek MT2712 vdecsys clocks.
+
+config COMMON_CLK_MT2712_VENCSYS
+       bool "Clock driver for Mediatek MT2712 vencsys"
+       depends on COMMON_CLK_MT2712
+       ---help---
+         This driver supports Mediatek MT2712 vencsys clocks.
+
 config COMMON_CLK_MT6797
        bool "Clock driver for Mediatek MT6797"
        depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -82,6 +132,36 @@ config COMMON_CLK_MT6797_VENCSYS
        ---help---
          This driver supports Mediatek MT6797 vencsys clocks.
 
+config COMMON_CLK_MT7622
+       bool "Clock driver for MediaTek MT7622"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       select COMMON_CLK_MEDIATEK
+       default ARCH_MEDIATEK
+       ---help---
+         This driver supports MediaTek MT7622 basic clocks and clocks
+         required for various periperals found on MediaTek.
+
+config COMMON_CLK_MT7622_ETHSYS
+       bool "Clock driver for MediaTek MT7622 ETHSYS"
+       depends on COMMON_CLK_MT7622
+       ---help---
+         This driver add support for clocks for Ethernet and SGMII
+         required on MediaTek MT7622 SoC.
+
+config COMMON_CLK_MT7622_HIFSYS
+       bool "Clock driver for MediaTek MT7622 HIFSYS"
+       depends on COMMON_CLK_MT7622
+       ---help---
+         This driver supports MediaTek MT7622 HIFSYS clocks providing
+         to PCI-E and USB.
+
+config COMMON_CLK_MT7622_AUDSYS
+       bool "Clock driver for MediaTek MT7622 AUDSYS"
+       depends on COMMON_CLK_MT7622
+       ---help---
+         This driver supports MediaTek MT7622 AUDSYS clocks providing
+         to audio consumers such as I2S and TDM.
+
 config COMMON_CLK_MT8135
        bool "Clock driver for Mediatek MT8135"
        depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
index ba2a070765f09c9ee3bf1936c94fdb387c974c4f..c421ffcd49ffd1218eac72a70c08a3342619649c 100644 (file)
@@ -13,5 +13,17 @@ obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
 obj-$(CONFIG_COMMON_CLK_MT2701_IMGSYS) += clk-mt2701-img.o
 obj-$(CONFIG_COMMON_CLK_MT2701_MMSYS) += clk-mt2701-mm.o
 obj-$(CONFIG_COMMON_CLK_MT2701_VDECSYS) += clk-mt2701-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712) += clk-mt2712.o
+obj-$(CONFIG_COMMON_CLK_MT2712_BDPSYS) += clk-mt2712-bdp.o
+obj-$(CONFIG_COMMON_CLK_MT2712_IMGSYS) += clk-mt2712-img.o
+obj-$(CONFIG_COMMON_CLK_MT2712_JPGDECSYS) += clk-mt2712-jpgdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712_MFGCFG) += clk-mt2712-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT2712_MMSYS) += clk-mt2712-mm.o
+obj-$(CONFIG_COMMON_CLK_MT2712_VDECSYS) += clk-mt2712-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712_VENCSYS) += clk-mt2712-venc.o
+obj-$(CONFIG_COMMON_CLK_MT7622) += clk-mt7622.o
+obj-$(CONFIG_COMMON_CLK_MT7622_ETHSYS) += clk-mt7622-eth.o
+obj-$(CONFIG_COMMON_CLK_MT7622_HIFSYS) += clk-mt7622-hif.o
+obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
 obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
 obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
index 9598889f972b0dd7b163354861a508e8ad8e3ce0..8e7f16fd87c93b80f586d2f223c95c38a10be967 100644 (file)
@@ -750,7 +750,7 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
 
 static struct clk_onecell_data *infra_clk_data;
 
-static void mtk_infrasys_init_early(struct device_node *node)
+static void __init mtk_infrasys_init_early(struct device_node *node)
 {
        int r, i;
 
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
new file mode 100644 (file)
index 0000000..5fe4728
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs bdp_cg_regs = {
+       .set_ofs = 0x100,
+       .clr_ofs = 0x100,
+       .sta_ofs = 0x100,
+};
+
+#define GATE_BDP(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &bdp_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,     \
+       }
+
+static const struct mtk_gate bdp_clks[] = {
+       GATE_BDP(CLK_BDP_BRIDGE_B, "bdp_bridge_b", "mm_sel", 0),
+       GATE_BDP(CLK_BDP_BRIDGE_DRAM, "bdp_bridge_d", "mm_sel", 1),
+       GATE_BDP(CLK_BDP_LARB_DRAM, "bdp_larb_d", "mm_sel", 2),
+       GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_PXL, "bdp_vdi_pxl", "tvd_sel", 3),
+       GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_DRAM, "bdp_vdi_d", "mm_sel", 4),
+       GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_B, "bdp_vdi_b", "mm_sel", 5),
+       GATE_BDP(CLK_BDP_MT_B, "bdp_fmt_b", "mm_sel", 9),
+       GATE_BDP(CLK_BDP_DISPFMT_27M, "bdp_27m", "di_sel", 10),
+       GATE_BDP(CLK_BDP_DISPFMT_27M_VDOUT, "bdp_27m_vdout", "di_sel", 11),
+       GATE_BDP(CLK_BDP_DISPFMT_27_74_74, "bdp_27_74_74", "di_sel", 12),
+       GATE_BDP(CLK_BDP_DISPFMT_2FS, "bdp_2fs", "di_sel", 13),
+       GATE_BDP(CLK_BDP_DISPFMT_2FS_2FS74_148, "bdp_2fs74_148", "di_sel", 14),
+       GATE_BDP(CLK_BDP_DISPFMT_B, "bdp_b", "mm_sel", 15),
+       GATE_BDP(CLK_BDP_VDO_DRAM, "bdp_vdo_d", "mm_sel", 16),
+       GATE_BDP(CLK_BDP_VDO_2FS, "bdp_vdo_2fs", "di_sel", 17),
+       GATE_BDP(CLK_BDP_VDO_B, "bdp_vdo_b", "mm_sel", 18),
+       GATE_BDP(CLK_BDP_WR_CHANNEL_DI_PXL, "bdp_di_pxl", "di_sel", 19),
+       GATE_BDP(CLK_BDP_WR_CHANNEL_DI_DRAM, "bdp_di_d", "mm_sel", 20),
+       GATE_BDP(CLK_BDP_WR_CHANNEL_DI_B, "bdp_di_b", "mm_sel", 21),
+       GATE_BDP(CLK_BDP_NR_AGENT, "bdp_nr_agent", "nr_sel", 22),
+       GATE_BDP(CLK_BDP_NR_DRAM, "bdp_nr_d", "mm_sel", 23),
+       GATE_BDP(CLK_BDP_NR_B, "bdp_nr_b", "mm_sel", 24),
+       GATE_BDP(CLK_BDP_BRIDGE_RT_B, "bdp_bridge_rt_b", "mm_sel", 25),
+       GATE_BDP(CLK_BDP_BRIDGE_RT_DRAM, "bdp_bridge_rt_d", "mm_sel", 26),
+       GATE_BDP(CLK_BDP_LARB_RT_DRAM, "bdp_larb_rt_d", "mm_sel", 27),
+       GATE_BDP(CLK_BDP_TVD_TDC, "bdp_tvd_tdc", "mm_sel", 28),
+       GATE_BDP(CLK_BDP_TVD_54, "bdp_tvd_clk_54", "tvd_sel", 29),
+       GATE_BDP(CLK_BDP_TVD_CBUS, "bdp_tvd_cbus", "mm_sel", 30),
+};
+
+static int clk_mt2712_bdp_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_BDP_NR_CLK);
+
+       mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_bdp[] = {
+       { .compatible = "mediatek,mt2712-bdpsys", },
+       {}
+};
+
+static struct platform_driver clk_mt2712_bdp_drv = {
+       .probe = clk_mt2712_bdp_probe,
+       .driver = {
+               .name = "clk-mt2712-bdp",
+               .of_match_table = of_match_clk_mt2712_bdp,
+       },
+};
+
+builtin_platform_driver(clk_mt2712_bdp_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
new file mode 100644 (file)
index 0000000..139ff55
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+       .set_ofs = 0x0,
+       .clr_ofs = 0x0,
+       .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &img_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,     \
+       }
+
+static const struct mtk_gate img_clks[] = {
+       GATE_IMG(CLK_IMG_SMI_LARB2, "img_smi_larb2", "mm_sel", 0),
+       GATE_IMG(CLK_IMG_SENINF_SCAM_EN, "img_scam_en", "csi0", 3),
+       GATE_IMG(CLK_IMG_SENINF_CAM_EN, "img_cam_en", "mm_sel", 8),
+       GATE_IMG(CLK_IMG_CAM_SV_EN, "img_cam_sv_en", "mm_sel", 9),
+       GATE_IMG(CLK_IMG_CAM_SV1_EN, "img_cam_sv1_en", "mm_sel", 10),
+       GATE_IMG(CLK_IMG_CAM_SV2_EN, "img_cam_sv2_en", "mm_sel", 11),
+};
+
+static int clk_mt2712_img_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+       mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_img[] = {
+       { .compatible = "mediatek,mt2712-imgsys", },
+       {}
+};
+
+static struct platform_driver clk_mt2712_img_drv = {
+       .probe = clk_mt2712_img_probe,
+       .driver = {
+               .name = "clk-mt2712-img",
+               .of_match_table = of_match_clk_mt2712_img,
+       },
+};
+
+builtin_platform_driver(clk_mt2712_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
new file mode 100644 (file)
index 0000000..c7d4aad
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs jpgdec_cg_regs = {
+       .set_ofs = 0x4,
+       .clr_ofs = 0x8,
+       .sta_ofs = 0x0,
+};
+
+#define GATE_JPGDEC(_id, _name, _parent, _shift) {     \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &jpgdec_cg_regs,                        \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr_inv,    \
+       }
+
+static const struct mtk_gate jpgdec_clks[] = {
+       GATE_JPGDEC(CLK_JPGDEC_JPGDEC1, "jpgdec_jpgdec1", "jpgdec_sel", 0),
+       GATE_JPGDEC(CLK_JPGDEC_JPGDEC, "jpgdec_jpgdec", "jpgdec_sel", 4),
+};
+
+static int clk_mt2712_jpgdec_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_JPGDEC_NR_CLK);
+
+       mtk_clk_register_gates(node, jpgdec_clks, ARRAY_SIZE(jpgdec_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_jpgdec[] = {
+       { .compatible = "mediatek,mt2712-jpgdecsys", },
+       {}
+};
+
+static struct platform_driver clk_mt2712_jpgdec_drv = {
+       .probe = clk_mt2712_jpgdec_probe,
+       .driver = {
+               .name = "clk-mt2712-jpgdec",
+               .of_match_table = of_match_clk_mt2712_jpgdec,
+       },
+};
+
+builtin_platform_driver(clk_mt2712_jpgdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
new file mode 100644 (file)
index 0000000..570f72d
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+       .set_ofs = 0x4,
+       .clr_ofs = 0x8,
+       .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &mfg_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr,        \
+       }
+
+static const struct mtk_gate mfg_clks[] = {
+       GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
+};
+
+static int clk_mt2712_mfg_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+       mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_mfg[] = {
+       { .compatible = "mediatek,mt2712-mfgcfg", },
+       {}
+};
+
+static struct platform_driver clk_mt2712_mfg_drv = {
+       .probe = clk_mt2712_mfg_probe,
+       .driver = {
+               .name = "clk-mt2712-mfg",
+               .of_match_table = of_match_clk_mt2712_mfg,
+       },
+};
+
+builtin_platform_driver(clk_mt2712_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
new file mode 100644 (file)
index 0000000..a8b4b6d
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+       .set_ofs = 0x104,
+       .clr_ofs = 0x108,
+       .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+       .set_ofs = 0x114,
+       .clr_ofs = 0x118,
+       .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm2_cg_regs = {
+       .set_ofs = 0x224,
+       .clr_ofs = 0x228,
+       .sta_ofs = 0x220,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &mm0_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr,        \
+       }
+
+#define GATE_MM1(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &mm1_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr,        \
+       }
+
+#define GATE_MM2(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &mm2_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr,        \
+       }
+
+static const struct mtk_gate mm_clks[] = {
+       /* MM0 */
+       GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+       GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+       GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+       GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+       GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+       GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+       GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+       GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+       GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+       GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+       GATE_MM0(CLK_MM_MDP_CROP, "mm_mdp_crop", "mm_sel", 10),
+       GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+       GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+       GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+       GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+       GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "clk32k", 15),
+       GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+       GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+       GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+       GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+       GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+       GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+       GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+       GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+       GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+       GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+       GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+       GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+       GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+       GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+       /* MM1 */
+       GATE_MM1(CLK_MM_DISP_PWM0_MM, "mm_pwm0_mm", "mm_sel", 0),
+       GATE_MM1(CLK_MM_DISP_PWM0_26M, "mm_pwm0_26m", "pwm_sel", 1),
+       GATE_MM1(CLK_MM_DISP_PWM1_MM, "mm_pwm1_mm", "mm_sel", 2),
+       GATE_MM1(CLK_MM_DISP_PWM1_26M, "mm_pwm1_26m", "pwm_sel", 3),
+       GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+       GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_lntc", 5),
+       GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+       GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_lntc", 7),
+       GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "vpll_dpix", 8),
+       GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+       GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "vpll3_dpix", 10),
+       GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
+       GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "vpll_dpix", 16),
+       GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvdstx", 17),
+       GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
+       GATE_MM1(CLK_MM_SMI_COMMON1, "mm_smi_common1", "mm_sel", 21),
+       GATE_MM1(CLK_MM_SMI_LARB5, "mm_smi_larb5", "mm_sel", 22),
+       GATE_MM1(CLK_MM_MDP_RDMA2, "mm_mdp_rdma2", "mm_sel", 23),
+       GATE_MM1(CLK_MM_MDP_TDSHP2, "mm_mdp_tdshp2", "mm_sel", 24),
+       GATE_MM1(CLK_MM_DISP_OVL2, "mm_disp_ovl2", "mm_sel", 25),
+       GATE_MM1(CLK_MM_DISP_WDMA2, "mm_disp_wdma2", "mm_sel", 26),
+       GATE_MM1(CLK_MM_DISP_COLOR2, "mm_disp_color2", "mm_sel", 27),
+       GATE_MM1(CLK_MM_DISP_AAL1, "mm_disp_aal1", "mm_sel", 28),
+       GATE_MM1(CLK_MM_DISP_OD1, "mm_disp_od1", "mm_sel", 29),
+       GATE_MM1(CLK_MM_LVDS1_PIXEL, "mm_lvds1_pixel", "vpll3_dpix", 30),
+       GATE_MM1(CLK_MM_LVDS1_CTS, "mm_lvds1_cts", "lvdstx3", 31),
+       /* MM2 */
+       GATE_MM2(CLK_MM_SMI_LARB7, "mm_smi_larb7", "mm_sel", 0),
+       GATE_MM2(CLK_MM_MDP_RDMA3, "mm_mdp_rdma3", "mm_sel", 1),
+       GATE_MM2(CLK_MM_MDP_WROT2, "mm_mdp_wrot2", "mm_sel", 2),
+       GATE_MM2(CLK_MM_DSI2, "mm_dsi2", "mm_sel", 3),
+       GATE_MM2(CLK_MM_DSI2_DIGITAL, "mm_dsi2_digital", "dsi0_lntc", 4),
+       GATE_MM2(CLK_MM_DSI3, "mm_dsi3", "mm_sel", 5),
+       GATE_MM2(CLK_MM_DSI3_DIGITAL, "mm_dsi3_digital", "dsi1_lntc", 6),
+};
+
+static int clk_mt2712_mm_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+       mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_mm[] = {
+       { .compatible = "mediatek,mt2712-mmsys", },
+       {}
+};
+
+static struct platform_driver clk_mt2712_mm_drv = {
+       .probe = clk_mt2712_mm_probe,
+       .driver = {
+               .name = "clk-mt2712-mm",
+               .of_match_table = of_match_clk_mt2712_mm,
+       },
+};
+
+builtin_platform_driver(clk_mt2712_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
new file mode 100644 (file)
index 0000000..55c64ee
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+       .set_ofs = 0x0,
+       .clr_ofs = 0x4,
+       .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+       .set_ofs = 0x8,
+       .clr_ofs = 0xc,
+       .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &vdec0_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr_inv,    \
+       }
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &vdec1_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr_inv,    \
+       }
+
+static const struct mtk_gate vdec_clks[] = {
+       /* VDEC0 */
+       GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", 0),
+       /* VDEC1 */
+       GATE_VDEC1(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "vdec_sel", 0),
+       GATE_VDEC1(CLK_VDEC_IMGRZ_CKEN, "vdec_imgrz_cken", "vdec_sel", 1),
+};
+
+static int clk_mt2712_vdec_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+       mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_vdec[] = {
+       { .compatible = "mediatek,mt2712-vdecsys", },
+       {}
+};
+
+static struct platform_driver clk_mt2712_vdec_drv = {
+       .probe = clk_mt2712_vdec_probe,
+       .driver = {
+               .name = "clk-mt2712-vdec",
+               .of_match_table = of_match_clk_mt2712_vdec,
+       },
+};
+
+builtin_platform_driver(clk_mt2712_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
new file mode 100644 (file)
index 0000000..ccbfe98
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+       .set_ofs = 0x4,
+       .clr_ofs = 0x8,
+       .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) {       \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &venc_cg_regs,                  \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr_inv,    \
+       }
+
+static const struct mtk_gate venc_clks[] = {
+       GATE_VENC(CLK_VENC_SMI_COMMON_CON, "venc_smi", "mm_sel", 0),
+       GATE_VENC(CLK_VENC_VENC, "venc_venc", "venc_sel", 4),
+       GATE_VENC(CLK_VENC_SMI_LARB6, "venc_smi_larb6", "jpgdec_sel", 12),
+};
+
+static int clk_mt2712_venc_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+       mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_venc[] = {
+       { .compatible = "mediatek,mt2712-vencsys", },
+       {}
+};
+
+static struct platform_driver clk_mt2712_venc_drv = {
+       .probe = clk_mt2712_venc_probe,
+       .driver = {
+               .name = "clk-mt2712-venc",
+               .of_match_table = of_match_clk_mt2712_venc,
+       },
+};
+
+builtin_platform_driver(clk_mt2712_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
new file mode 100644 (file)
index 0000000..498d137
--- /dev/null
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static DEFINE_SPINLOCK(mt2712_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+       FIXED_CLK(CLK_TOP_VPLL3_DPIX, "vpll3_dpix", NULL, 200000000),
+       FIXED_CLK(CLK_TOP_VPLL_DPIX, "vpll_dpix", NULL, 200000000),
+       FIXED_CLK(CLK_TOP_LTEPLL_FS26M, "ltepll_fs26m", NULL, 26000000),
+       FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", NULL, 350000000),
+       FIXED_CLK(CLK_TOP_DSI0_LNTC, "dsi0_lntc", NULL, 143000000),
+       FIXED_CLK(CLK_TOP_DSI1_LNTC, "dsi1_lntc", NULL, 143000000),
+       FIXED_CLK(CLK_TOP_LVDSTX3_CLKDIG_CTS, "lvdstx3", NULL, 140000000),
+       FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx", NULL, 140000000),
+       FIXED_CLK(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", NULL, 32768),
+       FIXED_CLK(CLK_TOP_CLKRTC_INT, "clkrtc_int", NULL, 32747),
+       FIXED_CLK(CLK_TOP_CSI0, "csi0", NULL, 26000000),
+       FIXED_CLK(CLK_TOP_CVBSPLL, "cvbspll", NULL, 108000000),
+};
+
+static const struct mtk_fixed_factor top_early_divs[] = {
+       FACTOR(CLK_TOP_SYS_26M, "sys_26m", "clk26m", 1,
+               1),
+       FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "sys_26m", 1,
+               2),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+       FACTOR(CLK_TOP_ARMCA35PLL, "armca35pll_ck", "armca35pll", 1,
+               1),
+       FACTOR(CLK_TOP_ARMCA35PLL_600M, "armca35pll_600m", "armca35pll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_ARMCA35PLL_400M, "armca35pll_400m", "armca35pll_ck", 1,
+               3),
+       FACTOR(CLK_TOP_ARMCA72PLL, "armca72pll_ck", "armca72pll", 1,
+               1),
+       FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1,
+               1),
+       FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1,
+               2),
+       FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1,
+               4),
+       FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1,
+               8),
+       FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1,
+               16),
+       FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "syspll_ck", 1,
+               3),
+       FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1,
+               2),
+       FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1,
+               4),
+       FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "syspll_ck", 1,
+               5),
+       FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1,
+               2),
+       FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1,
+               4),
+       FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "syspll_ck", 1,
+               7),
+       FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1,
+               2),
+       FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1,
+               4),
+       FACTOR(CLK_TOP_UNIVPLL, "univpll_ck", "univpll", 1,
+               1),
+       FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll_ck", 1,
+               7),
+       FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll_ck", 1,
+               26),
+       FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univpll_ck", 1,
+               52),
+       FACTOR(CLK_TOP_UNIVPLL_D104, "univpll_d104", "univpll_ck", 1,
+               104),
+       FACTOR(CLK_TOP_UNIVPLL_D208, "univpll_d208", "univpll_ck", 1,
+               208),
+       FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1,
+               2),
+       FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1,
+               4),
+       FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1,
+               8),
+       FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll_ck", 1,
+               3),
+       FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1,
+               2),
+       FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1,
+               4),
+       FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1,
+               8),
+       FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll_ck", 1,
+               5),
+       FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1,
+               2),
+       FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1,
+               4),
+       FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1,
+               8),
+       FACTOR(CLK_TOP_F_MP0_PLL1, "f_mp0_pll1_ck", "univpll_d2", 1,
+               1),
+       FACTOR(CLK_TOP_F_MP0_PLL2, "f_mp0_pll2_ck", "univpll1_d2", 1,
+               1),
+       FACTOR(CLK_TOP_F_BIG_PLL1, "f_big_pll1_ck", "univpll_d2", 1,
+               1),
+       FACTOR(CLK_TOP_F_BIG_PLL2, "f_big_pll2_ck", "univpll1_d2", 1,
+               1),
+       FACTOR(CLK_TOP_F_BUS_PLL1, "f_bus_pll1_ck", "univpll_d2", 1,
+               1),
+       FACTOR(CLK_TOP_F_BUS_PLL2, "f_bus_pll2_ck", "univpll1_d2", 1,
+               1),
+       FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1,
+               1),
+       FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1,
+               2),
+       FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1,
+               4),
+       FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1,
+               8),
+       FACTOR(CLK_TOP_APLL1_D16, "apll1_d16", "apll1_ck", 1,
+               16),
+       FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1,
+               1),
+       FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1,
+               2),
+       FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2_ck", 1,
+               4),
+       FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2_ck", 1,
+               8),
+       FACTOR(CLK_TOP_APLL2_D16, "apll2_d16", "apll2_ck", 1,
+               16),
+       FACTOR(CLK_TOP_LVDSPLL, "lvdspll_ck", "lvdspll", 1,
+               1),
+       FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll_ck", 1,
+               4),
+       FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll_ck", 1,
+               8),
+       FACTOR(CLK_TOP_LVDSPLL2, "lvdspll2_ck", "lvdspll2", 1,
+               1),
+       FACTOR(CLK_TOP_LVDSPLL2_D2, "lvdspll2_d2", "lvdspll2_ck", 1,
+               2),
+       FACTOR(CLK_TOP_LVDSPLL2_D4, "lvdspll2_d4", "lvdspll2_ck", 1,
+               4),
+       FACTOR(CLK_TOP_LVDSPLL2_D8, "lvdspll2_d8", "lvdspll2_ck", 1,
+               8),
+       FACTOR(CLK_TOP_ETHERPLL_125M, "etherpll_125m", "etherpll", 1,
+               1),
+       FACTOR(CLK_TOP_ETHERPLL_50M, "etherpll_50m", "etherpll", 1,
+               1),
+       FACTOR(CLK_TOP_CVBS, "cvbs", "cvbspll", 1,
+               1),
+       FACTOR(CLK_TOP_CVBS_D2, "cvbs_d2", "cvbs", 1,
+               2),
+       FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1,
+               1),
+       FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1,
+               1),
+       FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1,
+               1),
+       FACTOR(CLK_TOP_VCODECPLL_D2, "vcodecpll_d2", "vcodecpll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1,
+               1),
+       FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1,
+               4),
+       FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1,
+               8),
+       FACTOR(CLK_TOP_TVDPLL_429M, "tvdpll_429m", "tvdpll", 1,
+               1),
+       FACTOR(CLK_TOP_TVDPLL_429M_D2, "tvdpll_429m_d2", "tvdpll_429m", 1,
+               2),
+       FACTOR(CLK_TOP_TVDPLL_429M_D4, "tvdpll_429m_d4", "tvdpll_429m", 1,
+               4),
+       FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1,
+               1),
+       FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1,
+               2),
+       FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll_ck", 1,
+               4),
+       FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1,
+               1),
+       FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2_ck", 1,
+               2),
+       FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2_ck", 1,
+               4),
+       FACTOR(CLK_TOP_D2A_ULCLK_6P5M, "d2a_ulclk_6p5m", "clk26m", 1,
+               4),
+};
+
+static const char * const axi_parents[] = {
+       "clk26m",
+       "syspll1_d2",
+       "syspll_d5",
+       "syspll1_d4",
+       "univpll_d5",
+       "univpll2_d2",
+       "msdcpll2_ck"
+};
+
+static const char * const mem_parents[] = {
+       "clk26m",
+       "dmpll_ck"
+};
+
+static const char * const mm_parents[] = {
+       "clk26m",
+       "vencpll_ck",
+       "syspll_d3",
+       "syspll1_d2",
+       "syspll_d5",
+       "syspll1_d4",
+       "univpll1_d2",
+       "univpll2_d2"
+};
+
+static const char * const pwm_parents[] = {
+       "clk26m",
+       "univpll2_d4",
+       "univpll3_d2",
+       "univpll1_d4"
+};
+
+static const char * const vdec_parents[] = {
+       "clk26m",
+       "vcodecpll_ck",
+       "tvdpll_429m",
+       "univpll_d3",
+       "vencpll_ck",
+       "syspll_d3",
+       "univpll1_d2",
+       "mmpll_d2",
+       "syspll3_d2",
+       "tvdpll_ck"
+};
+
+static const char * const venc_parents[] = {
+       "clk26m",
+       "univpll1_d2",
+       "mmpll_d2",
+       "tvdpll_d2",
+       "syspll1_d2",
+       "univpll_d5",
+       "vcodecpll_d2",
+       "univpll2_d2",
+       "syspll3_d2"
+};
+
+static const char * const mfg_parents[] = {
+       "clk26m",
+       "mmpll_ck",
+       "univpll_d3",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "clk26m",
+       "syspll_d3",
+       "syspll1_d2",
+       "syspll_d5",
+       "univpll_d3",
+       "univpll1_d2",
+       "univpll_d5",
+       "univpll2_d2"
+};
+
+static const char * const camtg_parents[] = {
+       "clk26m",
+       "univpll_d52",
+       "univpll_d208",
+       "univpll_d104",
+       "clk26m_d2",
+       "univpll_d26",
+       "univpll2_d8",
+       "syspll3_d4",
+       "syspll3_d2",
+       "univpll1_d4",
+       "univpll2_d2"
+};
+
+static const char * const uart_parents[] = {
+       "clk26m",
+       "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+       "clk26m",
+       "univpll2_d4",
+       "univpll1_d4",
+       "univpll2_d2",
+       "univpll3_d2",
+       "univpll1_d8"
+};
+
+static const char * const usb20_parents[] = {
+       "clk26m",
+       "univpll1_d8",
+       "univpll3_d4"
+};
+
+static const char * const usb30_parents[] = {
+       "clk26m",
+       "univpll3_d2",
+       "univpll3_d4",
+       "univpll2_d4"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+       "clk26m",
+       "syspll1_d2",
+       "syspll2_d2",
+       "syspll4_d2",
+       "univpll_d5",
+       "univpll1_d4"
+};
+
+static const char * const msdc50_0_parents[] = {
+       "clk26m",
+       "msdcpll_ck",
+       "msdcpll_d2",
+       "univpll1_d4",
+       "syspll2_d2",
+       "msdcpll_d4",
+       "vencpll_d2",
+       "univpll1_d2",
+       "msdcpll2_ck",
+       "msdcpll2_d2",
+       "msdcpll2_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+       "clk26m",
+       "univpll2_d2",
+       "msdcpll_d2",
+       "univpll1_d4",
+       "syspll2_d2",
+       "univpll_d7",
+       "vencpll_d2"
+};
+
+static const char * const msdc30_3_parents[] = {
+       "clk26m",
+       "msdcpll2_ck",
+       "msdcpll2_d2",
+       "univpll2_d2",
+       "msdcpll2_d4",
+       "univpll1_d4",
+       "syspll2_d2",
+       "syspll_d7",
+       "univpll_d7",
+       "vencpll_d2",
+       "msdcpll_ck",
+       "msdcpll_d2",
+       "msdcpll_d4"
+};
+
+static const char * const audio_parents[] = {
+       "clk26m",
+       "syspll3_d4",
+       "syspll4_d4",
+       "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+       "clk26m",
+       "syspll1_d4",
+       "syspll4_d2",
+       "univpll3_d2",
+       "univpll2_d8",
+       "syspll3_d2",
+       "syspll3_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+       "clk26m",
+       "syspll1_d8",
+       "syspll3_d4",
+       "syspll1_d16",
+       "univpll3_d4",
+       "univpll_d26",
+       "syspll3_d4"
+};
+
+static const char * const dpilvds1_parents[] = {
+       "clk26m",
+       "lvdspll2_ck",
+       "lvdspll2_d2",
+       "lvdspll2_d4",
+       "lvdspll2_d8",
+       "clkfpc"
+};
+
+static const char * const atb_parents[] = {
+       "clk26m",
+       "syspll1_d2",
+       "univpll_d5",
+       "syspll_d5"
+};
+
+static const char * const nr_parents[] = {
+       "clk26m",
+       "univpll1_d4",
+       "syspll2_d2",
+       "syspll1_d4",
+       "univpll1_d8",
+       "univpll3_d2",
+       "univpll2_d2",
+       "syspll_d5"
+};
+
+static const char * const nfi2x_parents[] = {
+       "clk26m",
+       "syspll4_d4",
+       "univpll3_d4",
+       "univpll1_d8",
+       "syspll2_d4",
+       "univpll3_d2",
+       "syspll_d7",
+       "syspll2_d2",
+       "univpll2_d2",
+       "syspll_d5",
+       "syspll1_d2"
+};
+
+static const char * const irda_parents[] = {
+       "clk26m",
+       "univpll2_d4",
+       "syspll2_d4",
+       "univpll2_d8"
+};
+
+static const char * const cci400_parents[] = {
+       "clk26m",
+       "vencpll_ck",
+       "armca35pll_600m",
+       "armca35pll_400m",
+       "univpll_d2",
+       "syspll_d2",
+       "msdcpll_ck",
+       "univpll_d3"
+};
+
+static const char * const aud_1_parents[] = {
+       "clk26m",
+       "apll1_ck",
+       "univpll2_d4",
+       "univpll2_d8"
+};
+
+static const char * const aud_2_parents[] = {
+       "clk26m",
+       "apll2_ck",
+       "univpll2_d4",
+       "univpll2_d8"
+};
+
+static const char * const mem_mfg_parents[] = {
+       "clk26m",
+       "mmpll_ck",
+       "univpll_d3"
+};
+
+static const char * const axi_mfg_parents[] = {
+       "clk26m",
+       "axi_sel",
+       "univpll_d5"
+};
+
+static const char * const scam_parents[] = {
+       "clk26m",
+       "syspll3_d2",
+       "univpll2_d4",
+       "syspll2_d4"
+};
+
+static const char * const nfiecc_parents[] = {
+       "clk26m",
+       "nfi2x_sel",
+       "syspll_d7",
+       "syspll2_d2",
+       "univpll2_d2",
+       "univpll_d5",
+       "syspll1_d2"
+};
+
+static const char * const pe2_mac_p0_parents[] = {
+       "clk26m",
+       "syspll1_d8",
+       "syspll4_d2",
+       "syspll2_d4",
+       "univpll2_d4",
+       "syspll3_d2"
+};
+
+static const char * const dpilvds_parents[] = {
+       "clk26m",
+       "lvdspll_ck",
+       "lvdspll_d2",
+       "lvdspll_d4",
+       "lvdspll_d8",
+       "clkfpc"
+};
+
+static const char * const hdcp_parents[] = {
+       "clk26m",
+       "syspll4_d2",
+       "syspll3_d4",
+       "univpll2_d4"
+};
+
+static const char * const hdcp_24m_parents[] = {
+       "clk26m",
+       "univpll_d26",
+       "univpll_d52",
+       "univpll2_d8"
+};
+
+static const char * const rtc_parents[] = {
+       "clkrtc_int",
+       "clkrtc_ext",
+       "clk26m",
+       "univpll3_d8"
+};
+
+static const char * const spinor_parents[] = {
+       "clk26m",
+       "clk26m_d2",
+       "syspll4_d4",
+       "univpll2_d8",
+       "univpll3_d4",
+       "syspll4_d2",
+       "syspll2_d4",
+       "univpll2_d4",
+       "etherpll_125m",
+       "syspll1_d4"
+};
+
+static const char * const apll_parents[] = {
+       "clk26m",
+       "apll1_ck",
+       "apll1_d2",
+       "apll1_d4",
+       "apll1_d8",
+       "apll1_d16",
+       "apll2_ck",
+       "apll2_d2",
+       "apll2_d4",
+       "apll2_d8",
+       "apll2_d16",
+       "clk26m",
+       "clk26m"
+};
+
+static const char * const a1sys_hp_parents[] = {
+       "clk26m",
+       "apll1_ck",
+       "apll1_d2",
+       "apll1_d4",
+       "apll1_d8"
+};
+
+static const char * const a2sys_hp_parents[] = {
+       "clk26m",
+       "apll2_ck",
+       "apll2_d2",
+       "apll2_d4",
+       "apll2_d8"
+};
+
+static const char * const asm_l_parents[] = {
+       "clk26m",
+       "univpll2_d4",
+       "univpll2_d2",
+       "syspll_d5"
+};
+
+static const char * const i2so1_parents[] = {
+       "clk26m",
+       "apll1_ck",
+       "apll2_ck"
+};
+
+static const char * const ether_125m_parents[] = {
+       "clk26m",
+       "etherpll_125m",
+       "univpll3_d2"
+};
+
+static const char * const ether_50m_parents[] = {
+       "clk26m",
+       "etherpll_50m",
+       "univpll_d26",
+       "univpll3_d4"
+};
+
+static const char * const jpgdec_parents[] = {
+       "clk26m",
+       "univpll_d3",
+       "tvdpll_429m",
+       "vencpll_ck",
+       "syspll_d3",
+       "vcodecpll_ck",
+       "univpll1_d2",
+       "armca35pll_400m",
+       "tvdpll_429m_d2",
+       "tvdpll_429m_d4"
+};
+
+static const char * const spislv_parents[] = {
+       "clk26m",
+       "univpll2_d4",
+       "univpll1_d4",
+       "univpll2_d2",
+       "univpll3_d2",
+       "univpll1_d8",
+       "univpll1_d2",
+       "univpll_d5"
+};
+
+static const char * const ether_parents[] = {
+       "clk26m",
+       "etherpll_50m",
+       "univpll_d26"
+};
+
+static const char * const di_parents[] = {
+       "clk26m",
+       "tvdpll_d2",
+       "tvdpll_d4",
+       "tvdpll_d8",
+       "vencpll_ck",
+       "vencpll_d2",
+       "cvbs",
+       "cvbs_d2"
+};
+
+static const char * const tvd_parents[] = {
+       "clk26m",
+       "cvbs_d2",
+       "univpll2_d8"
+};
+
+static const char * const i2c_parents[] = {
+       "clk26m",
+       "univpll_d26",
+       "univpll2_d4",
+       "univpll3_d2",
+       "univpll1_d4"
+};
+
+static const char * const msdc0p_aes_parents[] = {
+       "clk26m",
+       "msdcpll_ck",
+       "univpll_d3",
+       "vcodecpll_ck"
+};
+
+static const char * const cmsys_parents[] = {
+       "clk26m",
+       "univpll_d3",
+       "syspll_d3",
+       "syspll1_d2",
+       "syspll2_d2"
+};
+
+static const char * const gcpu_parents[] = {
+       "clk26m",
+       "syspll_d3",
+       "syspll1_d2",
+       "univpll1_d2",
+       "univpll_d5",
+       "univpll3_d2",
+       "univpll_d3"
+};
+
+static const char * const aud_apll1_parents[] = {
+       "apll1",
+       "clkaud_ext_i_1"
+};
+
+static const char * const aud_apll2_parents[] = {
+       "apll2",
+       "clkaud_ext_i_2"
+};
+
+static const char * const audull_vtx_parents[] = {
+       "d2a_ulclk_6p5m",
+       "clkaud_ext_i_0"
+};
+
+static struct mtk_composite top_muxes[] = {
+       /* CLK_CFG_0 */
+       MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x040, 0, 3,
+               7, CLK_IS_CRITICAL),
+       MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x040, 8, 1,
+               15, CLK_IS_CRITICAL),
+       MUX_GATE(CLK_TOP_MM_SEL, "mm_sel",
+               mm_parents, 0x040, 24, 3, 31),
+       /* CLK_CFG_1 */
+       MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel",
+               pwm_parents, 0x050, 0, 2, 7),
+       MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel",
+               vdec_parents, 0x050, 8, 4, 15),
+       MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel",
+               venc_parents, 0x050, 16, 4, 23),
+       MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel",
+               mfg_parents, 0x050, 24, 4, 31),
+       /* CLK_CFG_2 */
+       MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel",
+               camtg_parents, 0x060, 0, 4, 7),
+       MUX_GATE(CLK_TOP_UART_SEL, "uart_sel",
+               uart_parents, 0x060, 8, 1, 15),
+       MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel",
+               spi_parents, 0x060, 16, 3, 23),
+       MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel",
+               usb20_parents, 0x060, 24, 2, 31),
+       /* CLK_CFG_3 */
+       MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel",
+               usb30_parents, 0x070, 0, 2, 7),
+       MUX_GATE(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc50_0_h_sel",
+               msdc50_0_h_parents, 0x070, 8, 3, 15),
+       MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+               msdc50_0_parents, 0x070, 16, 4, 23),
+       MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+               msdc30_1_parents, 0x070, 24, 3, 31),
+       /* CLK_CFG_4 */
+       MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel",
+               msdc30_1_parents, 0x080, 0, 3, 7),
+       MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel",
+               msdc30_3_parents, 0x080, 8, 4, 15),
+       MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel",
+               audio_parents, 0x080, 16, 2, 23),
+       MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+               aud_intbus_parents, 0x080, 24, 3, 31),
+       /* CLK_CFG_5 */
+       MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel",
+               pmicspi_parents, 0x090, 0, 3, 7),
+       MUX_GATE(CLK_TOP_DPILVDS1_SEL, "dpilvds1_sel",
+               dpilvds1_parents, 0x090, 8, 3, 15),
+       MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel",
+               atb_parents, 0x090, 16, 2, 23),
+       MUX_GATE(CLK_TOP_NR_SEL, "nr_sel",
+               nr_parents, 0x090, 24, 3, 31),
+       /* CLK_CFG_6 */
+       MUX_GATE(CLK_TOP_NFI2X_SEL, "nfi2x_sel",
+               nfi2x_parents, 0x0a0, 0, 4, 7),
+       MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel",
+               irda_parents, 0x0a0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel",
+               cci400_parents, 0x0a0, 16, 3, 23),
+       MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel",
+               aud_1_parents, 0x0a0, 24, 2, 31),
+       /* CLK_CFG_7 */
+       MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel",
+               aud_2_parents, 0x0b0, 0, 2, 7),
+       MUX_GATE(CLK_TOP_MEM_MFG_IN_AS_SEL, "mem_mfg_sel",
+               mem_mfg_parents, 0x0b0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_AXI_MFG_IN_AS_SEL, "axi_mfg_sel",
+               axi_mfg_parents, 0x0b0, 16, 2, 23),
+       MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel",
+               scam_parents, 0x0b0, 24, 2, 31),
+       /* CLK_CFG_8 */
+       MUX_GATE(CLK_TOP_NFIECC_SEL, "nfiecc_sel",
+               nfiecc_parents, 0x0c0, 0, 3, 7),
+       MUX_GATE(CLK_TOP_PE2_MAC_P0_SEL, "pe2_mac_p0_sel",
+               pe2_mac_p0_parents, 0x0c0, 8, 3, 15),
+       MUX_GATE(CLK_TOP_PE2_MAC_P1_SEL, "pe2_mac_p1_sel",
+               pe2_mac_p0_parents, 0x0c0, 16, 3, 23),
+       MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel",
+               dpilvds_parents, 0x0c0, 24, 3, 31),
+       /* CLK_CFG_9 */
+       MUX_GATE(CLK_TOP_MSDC50_3_HCLK_SEL, "msdc50_3_h_sel",
+               msdc50_0_h_parents, 0x0d0, 0, 3, 7),
+       MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel",
+               hdcp_parents, 0x0d0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel",
+               hdcp_24m_parents, 0x0d0, 16, 2, 23),
+       MUX_GATE_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x0d0, 24, 2,
+               31, CLK_IS_CRITICAL),
+       /* CLK_CFG_10 */
+       MUX_GATE(CLK_TOP_SPINOR_SEL, "spinor_sel",
+               spinor_parents, 0x500, 0, 4, 7),
+       MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel",
+               apll_parents, 0x500, 8, 4, 15),
+       MUX_GATE(CLK_TOP_APLL2_SEL, "apll2_sel",
+               apll_parents, 0x500, 16, 4, 23),
+       MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel",
+               a1sys_hp_parents, 0x500, 24, 3, 31),
+       /* CLK_CFG_11 */
+       MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel",
+               a2sys_hp_parents, 0x510, 0, 3, 7),
+       MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel",
+               asm_l_parents, 0x510, 8, 2, 15),
+       MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel",
+               asm_l_parents, 0x510, 16, 2, 23),
+       MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel",
+               asm_l_parents, 0x510, 24, 2, 31),
+       /* CLK_CFG_12 */
+       MUX_GATE(CLK_TOP_I2SO1_SEL, "i2so1_sel",
+               i2so1_parents, 0x520, 0, 2, 7),
+       MUX_GATE(CLK_TOP_I2SO2_SEL, "i2so2_sel",
+               i2so1_parents, 0x520, 8, 2, 15),
+       MUX_GATE(CLK_TOP_I2SO3_SEL, "i2so3_sel",
+               i2so1_parents, 0x520, 16, 2, 23),
+       MUX_GATE(CLK_TOP_TDMO0_SEL, "tdmo0_sel",
+               i2so1_parents, 0x520, 24, 2, 31),
+       /* CLK_CFG_13 */
+       MUX_GATE(CLK_TOP_TDMO1_SEL, "tdmo1_sel",
+               i2so1_parents, 0x530, 0, 2, 7),
+       MUX_GATE(CLK_TOP_I2SI1_SEL, "i2si1_sel",
+               i2so1_parents, 0x530, 8, 2, 15),
+       MUX_GATE(CLK_TOP_I2SI2_SEL, "i2si2_sel",
+               i2so1_parents, 0x530, 16, 2, 23),
+       MUX_GATE(CLK_TOP_I2SI3_SEL, "i2si3_sel",
+               i2so1_parents, 0x530, 24, 2, 31),
+       /* CLK_CFG_14 */
+       MUX_GATE(CLK_TOP_ETHER_125M_SEL, "ether_125m_sel",
+               ether_125m_parents, 0x540, 0, 2, 7),
+       MUX_GATE(CLK_TOP_ETHER_50M_SEL, "ether_50m_sel",
+               ether_50m_parents, 0x540, 8, 2, 15),
+       MUX_GATE(CLK_TOP_JPGDEC_SEL, "jpgdec_sel",
+               jpgdec_parents, 0x540, 16, 4, 23),
+       MUX_GATE(CLK_TOP_SPISLV_SEL, "spislv_sel",
+               spislv_parents, 0x540, 24, 3, 31),
+       /* CLK_CFG_15 */
+       MUX_GATE(CLK_TOP_ETHER_50M_RMII_SEL, "ether_sel",
+               ether_parents, 0x550, 0, 2, 7),
+       MUX_GATE(CLK_TOP_CAM2TG_SEL, "cam2tg_sel",
+               camtg_parents, 0x550, 8, 4, 15),
+       MUX_GATE(CLK_TOP_DI_SEL, "di_sel",
+               di_parents, 0x550, 16, 3, 23),
+       MUX_GATE(CLK_TOP_TVD_SEL, "tvd_sel",
+               tvd_parents, 0x550, 24, 2, 31),
+       /* CLK_CFG_16 */
+       MUX_GATE(CLK_TOP_I2C_SEL, "i2c_sel",
+               i2c_parents, 0x560, 0, 3, 7),
+       MUX_GATE(CLK_TOP_PWM_INFRA_SEL, "pwm_infra_sel",
+               pwm_parents, 0x560, 8, 2, 15),
+       MUX_GATE(CLK_TOP_MSDC0P_AES_SEL, "msdc0p_aes_sel",
+               msdc0p_aes_parents, 0x560, 16, 2, 23),
+       MUX_GATE(CLK_TOP_CMSYS_SEL, "cmsys_sel",
+               cmsys_parents, 0x560, 24, 3, 31),
+       /* CLK_CFG_17 */
+       MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel",
+               gcpu_parents, 0x570, 0, 3, 7),
+       /* CLK_AUDDIV_4 */
+       MUX(CLK_TOP_AUD_APLL1_SEL, "aud_apll1_sel",
+               aud_apll1_parents, 0x134, 0, 1),
+       MUX(CLK_TOP_AUD_APLL2_SEL, "aud_apll2_sel",
+               aud_apll2_parents, 0x134, 1, 1),
+       MUX(CLK_TOP_DA_AUDULL_VTX_6P5M_SEL, "audull_vtx_sel",
+               audull_vtx_parents, 0x134, 31, 1),
+};
+
+static const char * const mcu_mp0_parents[] = {
+       "clk26m",
+       "armca35pll_ck",
+       "f_mp0_pll1_ck",
+       "f_mp0_pll2_ck"
+};
+
+static const char * const mcu_mp2_parents[] = {
+       "clk26m",
+       "armca72pll_ck",
+       "f_big_pll1_ck",
+       "f_big_pll2_ck"
+};
+
+static const char * const mcu_bus_parents[] = {
+       "clk26m",
+       "cci400_sel",
+       "f_bus_pll1_ck",
+       "f_bus_pll2_ck"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+       /* mp0_pll_divider_cfg */
+       MUX_GATE_FLAGS(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0,
+               9, 2, -1, CLK_IS_CRITICAL),
+       /* mp2_pll_divider_cfg */
+       MUX_GATE_FLAGS(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8,
+               9, 2, -1, CLK_IS_CRITICAL),
+       /* bus_pll_divider_cfg */
+       MUX_GATE_FLAGS(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0,
+               9, 2, -1, CLK_IS_CRITICAL),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+       DIV_ADJ(CLK_TOP_APLL_DIV0, "apll_div0", "i2so1_sel", 0x124, 0, 8),
+       DIV_ADJ(CLK_TOP_APLL_DIV1, "apll_div1", "i2so2_sel", 0x124, 8, 8),
+       DIV_ADJ(CLK_TOP_APLL_DIV2, "apll_div2", "i2so3_sel", 0x124, 16, 8),
+       DIV_ADJ(CLK_TOP_APLL_DIV3, "apll_div3", "tdmo0_sel", 0x124, 24, 8),
+       DIV_ADJ(CLK_TOP_APLL_DIV4, "apll_div4", "tdmo1_sel", 0x128, 0, 8),
+       DIV_ADJ(CLK_TOP_APLL_DIV5, "apll_div5", "i2si1_sel", 0x128, 8, 8),
+       DIV_ADJ(CLK_TOP_APLL_DIV6, "apll_div6", "i2si2_sel", 0x128, 16, 8),
+       DIV_ADJ(CLK_TOP_APLL_DIV7, "apll_div7", "i2si3_sel", 0x128, 24, 8),
+};
+
+static const struct mtk_gate_regs top_cg_regs = {
+       .set_ofs = 0x120,
+       .clr_ofs = 0x120,
+       .sta_ofs = 0x120,
+};
+
+#define GATE_TOP(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &top_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,     \
+       }
+
+static const struct mtk_gate top_clks[] = {
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN0, "apll_div_pdn0", "i2so1_sel", 0),
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN1, "apll_div_pdn1", "i2so2_sel", 1),
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN2, "apll_div_pdn2", "i2so3_sel", 2),
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN3, "apll_div_pdn3", "tdmo0_sel", 3),
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN4, "apll_div_pdn4", "tdmo1_sel", 4),
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN5, "apll_div_pdn5", "i2si1_sel", 5),
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN6, "apll_div_pdn6", "i2si2_sel", 6),
+       GATE_TOP(CLK_TOP_APLL_DIV_PDN7, "apll_div_pdn7", "i2si3_sel", 7),
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+       .set_ofs = 0x40,
+       .clr_ofs = 0x44,
+       .sta_ofs = 0x40,
+};
+
+#define GATE_INFRA(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &infra_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr,        \
+       }
+
+static const struct mtk_gate infra_clks[] = {
+       GATE_INFRA(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+       GATE_INFRA(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
+       GATE_INFRA(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
+       GATE_INFRA(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
+       GATE_INFRA(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "spi_sel", 24),
+       GATE_INFRA(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "spislv_sel", 25),
+       GATE_INFRA(CLK_INFRA_AO_UART5, "infra_ao_uart5", "axi_sel", 26),
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+       .set_ofs = 0x8,
+       .clr_ofs = 0x10,
+       .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+       .set_ofs = 0xc,
+       .clr_ofs = 0x14,
+       .sta_ofs = 0x1c,
+};
+
+static const struct mtk_gate_regs peri2_cg_regs = {
+       .set_ofs = 0x42c,
+       .clr_ofs = 0x42c,
+       .sta_ofs = 0x42c,
+};
+
+#define GATE_PERI0(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &peri0_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr,        \
+       }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &peri1_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_setclr,        \
+       }
+
+#define GATE_PERI2(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &peri2_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+       }
+
+static const struct mtk_gate peri_clks[] = {
+       /* PERI0 */
+       GATE_PERI0(CLK_PERI_NFI, "per_nfi",
+               "axi_sel", 0),
+       GATE_PERI0(CLK_PERI_THERM, "per_therm",
+               "axi_sel", 1),
+       GATE_PERI0(CLK_PERI_PWM0, "per_pwm0",
+               "pwm_sel", 2),
+       GATE_PERI0(CLK_PERI_PWM1, "per_pwm1",
+               "pwm_sel", 3),
+       GATE_PERI0(CLK_PERI_PWM2, "per_pwm2",
+               "pwm_sel", 4),
+       GATE_PERI0(CLK_PERI_PWM3, "per_pwm3",
+               "pwm_sel", 5),
+       GATE_PERI0(CLK_PERI_PWM4, "per_pwm4",
+               "pwm_sel", 6),
+       GATE_PERI0(CLK_PERI_PWM5, "per_pwm5",
+               "pwm_sel", 7),
+       GATE_PERI0(CLK_PERI_PWM6, "per_pwm6",
+               "pwm_sel", 8),
+       GATE_PERI0(CLK_PERI_PWM7, "per_pwm7",
+               "pwm_sel", 9),
+       GATE_PERI0(CLK_PERI_PWM, "per_pwm",
+               "pwm_sel", 10),
+       GATE_PERI0(CLK_PERI_AP_DMA, "per_ap_dma",
+               "axi_sel", 13),
+       GATE_PERI0(CLK_PERI_MSDC30_0, "per_msdc30_0",
+               "msdc50_0_sel", 14),
+       GATE_PERI0(CLK_PERI_MSDC30_1, "per_msdc30_1",
+               "msdc30_1_sel", 15),
+       GATE_PERI0(CLK_PERI_MSDC30_2, "per_msdc30_2",
+               "msdc30_2_sel", 16),
+       GATE_PERI0(CLK_PERI_MSDC30_3, "per_msdc30_3",
+               "msdc30_3_sel", 17),
+       GATE_PERI0(CLK_PERI_UART0, "per_uart0",
+               "uart_sel", 20),
+       GATE_PERI0(CLK_PERI_UART1, "per_uart1",
+               "uart_sel", 21),
+       GATE_PERI0(CLK_PERI_UART2, "per_uart2",
+               "uart_sel", 22),
+       GATE_PERI0(CLK_PERI_UART3, "per_uart3",
+               "uart_sel", 23),
+       GATE_PERI0(CLK_PERI_I2C0, "per_i2c0",
+               "axi_sel", 24),
+       GATE_PERI0(CLK_PERI_I2C1, "per_i2c1",
+               "axi_sel", 25),
+       GATE_PERI0(CLK_PERI_I2C2, "per_i2c2",
+               "axi_sel", 26),
+       GATE_PERI0(CLK_PERI_I2C3, "per_i2c3",
+               "axi_sel", 27),
+       GATE_PERI0(CLK_PERI_I2C4, "per_i2c4",
+               "axi_sel", 28),
+       GATE_PERI0(CLK_PERI_AUXADC, "per_auxadc",
+               "ltepll_fs26m", 29),
+       GATE_PERI0(CLK_PERI_SPI0, "per_spi0",
+               "spi_sel", 30),
+       /* PERI1 */
+       GATE_PERI1(CLK_PERI_SPI, "per_spi",
+               "spinor_sel", 1),
+       GATE_PERI1(CLK_PERI_I2C5, "per_i2c5",
+               "axi_sel", 3),
+       GATE_PERI1(CLK_PERI_SPI2, "per_spi2",
+               "spi_sel", 5),
+       GATE_PERI1(CLK_PERI_SPI3, "per_spi3",
+               "spi_sel", 6),
+       GATE_PERI1(CLK_PERI_SPI5, "per_spi5",
+               "spi_sel", 8),
+       GATE_PERI1(CLK_PERI_UART4, "per_uart4",
+               "uart_sel", 9),
+       GATE_PERI1(CLK_PERI_SFLASH, "per_sflash",
+               "uart_sel", 11),
+       GATE_PERI1(CLK_PERI_GMAC, "per_gmac",
+               "uart_sel", 12),
+       GATE_PERI1(CLK_PERI_PCIE0, "per_pcie0",
+               "uart_sel", 14),
+       GATE_PERI1(CLK_PERI_PCIE1, "per_pcie1",
+               "uart_sel", 15),
+       GATE_PERI1(CLK_PERI_GMAC_PCLK, "per_gmac_pclk",
+               "uart_sel", 16),
+       /* PERI2 */
+       GATE_PERI2(CLK_PERI_MSDC50_0_EN, "per_msdc50_0_en",
+               "msdc50_0_sel", 0),
+       GATE_PERI2(CLK_PERI_MSDC30_1_EN, "per_msdc30_1_en",
+               "msdc30_1_sel", 1),
+       GATE_PERI2(CLK_PERI_MSDC30_2_EN, "per_msdc30_2_en",
+               "msdc30_2_sel", 2),
+       GATE_PERI2(CLK_PERI_MSDC30_3_EN, "per_msdc30_3_en",
+               "msdc30_3_sel", 3),
+       GATE_PERI2(CLK_PERI_MSDC50_0_HCLK_EN, "per_msdc50_0_h",
+               "msdc50_0_h_sel", 4),
+       GATE_PERI2(CLK_PERI_MSDC50_3_HCLK_EN, "per_msdc50_3_h",
+               "msdc50_3_h_sel", 5),
+};
+
+#define MT2712_PLL_FMAX                (3000UL * MHZ)
+
+#define CON0_MT2712_RST_BAR    BIT(24)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,  \
+                       _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg,  \
+                       _tuner_en_bit, _pcw_reg, _pcw_shift,            \
+                       _div_table) {                                   \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .reg = _reg,                                            \
+               .pwr_reg = _pwr_reg,                                    \
+               .en_mask = _en_mask,                                    \
+               .flags = _flags,                                        \
+               .rst_bar_mask = CON0_MT2712_RST_BAR,                    \
+               .fmax = MT2712_PLL_FMAX,                                \
+               .pcwbits = _pcwbits,                                    \
+               .pd_reg = _pd_reg,                                      \
+               .pd_shift = _pd_shift,                                  \
+               .tuner_reg = _tuner_reg,                                \
+               .tuner_en_reg = _tuner_en_reg,                          \
+               .tuner_en_bit = _tuner_en_bit,                          \
+               .pcw_reg = _pcw_reg,                                    \
+               .pcw_shift = _pcw_shift,                                \
+               .div_table = _div_table,                                \
+       }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,    \
+                       _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg,  \
+                       _tuner_en_bit, _pcw_reg, _pcw_shift)            \
+               PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags,     \
+                       _pcwbits, _pd_reg, _pd_shift, _tuner_reg,       \
+                       _tuner_en_reg, _tuner_en_bit, _pcw_reg,         \
+                       _pcw_shift, NULL)
+
+static const struct mtk_pll_div_table armca35pll_div_table[] = {
+       { .div = 0, .freq = MT2712_PLL_FMAX },
+       { .div = 1, .freq = 1202500000 },
+       { .div = 2, .freq = 500500000 },
+       { .div = 3, .freq = 315250000 },
+       { .div = 4, .freq = 157625000 },
+       { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table armca72pll_div_table[] = {
+       { .div = 0, .freq = MT2712_PLL_FMAX },
+       { .div = 1, .freq = 994500000 },
+       { .div = 2, .freq = 520000000 },
+       { .div = 3, .freq = 315250000 },
+       { .div = 4, .freq = 157625000 },
+       { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+       { .div = 0, .freq = MT2712_PLL_FMAX },
+       { .div = 1, .freq = 1001000000 },
+       { .div = 2, .freq = 601250000 },
+       { .div = 3, .freq = 250250000 },
+       { .div = 4, .freq = 125125000 },
+       { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+       PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000101,
+               HAVE_RST_BAR, 31, 0x0230, 4, 0, 0, 0, 0x0234, 0),
+       PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000101,
+               HAVE_RST_BAR, 31, 0x0240, 4, 0, 0, 0, 0x0244, 0),
+       PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000101,
+               0, 31, 0x0320, 4, 0, 0, 0, 0x0324, 0),
+       PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000101,
+               0, 31, 0x0280, 4, 0, 0, 0, 0x0284, 0),
+       PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000101,
+               0, 31, 0x0330, 4, 0x0338, 0x0014, 0, 0x0334, 0),
+       PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000101,
+               0, 31, 0x0350, 4, 0x0358, 0x0014, 1, 0x0354, 0),
+       PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000101,
+               0, 31, 0x0370, 4, 0, 0, 0, 0x0374, 0),
+       PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000101,
+               0, 31, 0x0390, 4, 0, 0, 0, 0x0394, 0),
+       PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000101,
+               0, 31, 0x0270, 4, 0, 0, 0, 0x0274, 0),
+       PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000101,
+               0, 31, 0x0410, 4, 0, 0, 0, 0x0414, 0),
+       PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000101,
+               0, 31, 0x0290, 4, 0, 0, 0, 0x0294, 0),
+       PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000101,
+               0, 31, 0x0250, 4, 0, 0, 0, 0x0254, 0,
+               mmpll_div_table),
+       PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000101,
+               HAVE_RST_BAR, 31, 0x0100, 4, 0, 0, 0, 0x0104, 0,
+               armca35pll_div_table),
+       PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000101,
+               0, 31, 0x0210, 4, 0, 0, 0, 0x0214, 0,
+               armca72pll_div_table),
+       PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000101,
+               0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
+};
+
+static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+       mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt2712_top_init_early(struct device_node *node)
+{
+       int r, i;
+
+       if (!top_clk_data) {
+               top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+               for (i = 0; i < CLK_TOP_NR_CLK; i++)
+                       top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+       }
+
+       mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+                       top_clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+       if (r)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+}
+
+CLK_OF_DECLARE_DRIVER(mt2712_topckgen, "mediatek,mt2712-topckgen",
+                       clk_mt2712_top_init_early);
+
+static int clk_mt2712_top_probe(struct platform_device *pdev)
+{
+       int r, i;
+       struct device_node *node = pdev->dev.of_node;
+       void __iomem *base;
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base)) {
+               pr_err("%s(): ioremap failed\n", __func__);
+               return PTR_ERR(base);
+       }
+
+       if (!top_clk_data) {
+               top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+       } else {
+               for (i = 0; i < CLK_TOP_NR_CLK; i++) {
+                       if (top_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
+                               top_clk_data->clks[i] = ERR_PTR(-ENOENT);
+               }
+       }
+
+       mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+                       top_clk_data);
+       mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+                       top_clk_data);
+       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+       mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+                       &mt2712_clk_lock, top_clk_data);
+       mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+                       &mt2712_clk_lock, top_clk_data);
+       mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+                       top_clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static int clk_mt2712_infra_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+       mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       mtk_register_reset_controller(node, 2, 0x30);
+
+       return r;
+}
+
+static int clk_mt2712_peri_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+       mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+                       clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       mtk_register_reset_controller(node, 2, 0);
+
+       return r;
+}
+
+static int clk_mt2712_mcu_probe(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+       void __iomem *base;
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base)) {
+               pr_err("%s(): ioremap failed\n", __func__);
+               return PTR_ERR(base);
+       }
+
+       clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+
+       mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+                       &mt2712_clk_lock, clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+       if (r != 0)
+               pr_err("%s(): could not register clock provider: %d\n",
+                       __func__, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712[] = {
+       {
+               .compatible = "mediatek,mt2712-apmixedsys",
+               .data = clk_mt2712_apmixed_probe,
+       }, {
+               .compatible = "mediatek,mt2712-topckgen",
+               .data = clk_mt2712_top_probe,
+       }, {
+               .compatible = "mediatek,mt2712-infracfg",
+               .data = clk_mt2712_infra_probe,
+       }, {
+               .compatible = "mediatek,mt2712-pericfg",
+               .data = clk_mt2712_peri_probe,
+       }, {
+               .compatible = "mediatek,mt2712-mcucfg",
+               .data = clk_mt2712_mcu_probe,
+       }, {
+               /* sentinel */
+       }
+};
+
+static int clk_mt2712_probe(struct platform_device *pdev)
+{
+       int (*clk_probe)(struct platform_device *);
+       int r;
+
+       clk_probe = of_device_get_match_data(&pdev->dev);
+       if (!clk_probe)
+               return -EINVAL;
+
+       r = clk_probe(pdev);
+       if (r != 0)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       return r;
+}
+
+static struct platform_driver clk_mt2712_drv = {
+       .probe = clk_mt2712_probe,
+       .driver = {
+               .name = "clk-mt2712",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_clk_mt2712,
+       },
+};
+
+static int __init clk_mt2712_init(void)
+{
+       return platform_driver_register(&clk_mt2712_drv);
+}
+
+arch_initcall(clk_mt2712_init);
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
new file mode 100644 (file)
index 0000000..fad7d9f
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ *        Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) {     \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &audio0_cg_regs,                        \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,     \
+       }
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) {     \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &audio1_cg_regs,                        \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,     \
+       }
+
+#define GATE_AUDIO2(_id, _name, _parent, _shift) {     \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &audio2_cg_regs,                        \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,     \
+       }
+
+#define GATE_AUDIO3(_id, _name, _parent, _shift) {     \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &audio3_cg_regs,                        \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,     \
+       }
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+       .set_ofs = 0x0,
+       .clr_ofs = 0x0,
+       .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+       .set_ofs = 0x10,
+       .clr_ofs = 0x10,
+       .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs audio2_cg_regs = {
+       .set_ofs = 0x14,
+       .clr_ofs = 0x14,
+       .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs audio3_cg_regs = {
+       .set_ofs = 0x634,
+       .clr_ofs = 0x634,
+       .sta_ofs = 0x634,
+};
+
+static const struct mtk_gate audio_clks[] = {
+       /* AUDIO0 */
+       GATE_AUDIO0(CLK_AUDIO_AFE, "audio_afe", "rtc", 2),
+       GATE_AUDIO0(CLK_AUDIO_HDMI, "audio_hdmi", "apll1_ck_sel", 20),
+       GATE_AUDIO0(CLK_AUDIO_SPDF, "audio_spdf", "apll1_ck_sel", 21),
+       GATE_AUDIO0(CLK_AUDIO_APLL, "audio_apll", "apll1_ck_sel", 23),
+       /* AUDIO1 */
+       GATE_AUDIO1(CLK_AUDIO_I2SIN1, "audio_i2sin1", "a1sys_hp_sel", 0),
+       GATE_AUDIO1(CLK_AUDIO_I2SIN2, "audio_i2sin2", "a1sys_hp_sel", 1),
+       GATE_AUDIO1(CLK_AUDIO_I2SIN3, "audio_i2sin3", "a1sys_hp_sel", 2),
+       GATE_AUDIO1(CLK_AUDIO_I2SIN4, "audio_i2sin4", "a1sys_hp_sel", 3),
+       GATE_AUDIO1(CLK_AUDIO_I2SO1, "audio_i2so1", "a1sys_hp_sel", 6),
+       GATE_AUDIO1(CLK_AUDIO_I2SO2, "audio_i2so2", "a1sys_hp_sel", 7),
+       GATE_AUDIO1(CLK_AUDIO_I2SO3, "audio_i2so3", "a1sys_hp_sel", 8),
+       GATE_AUDIO1(CLK_AUDIO_I2SO4, "audio_i2so4", "a1sys_hp_sel", 9),
+       GATE_AUDIO1(CLK_AUDIO_ASRCI1, "audio_asrci1", "asm_h_sel", 12),
+       GATE_AUDIO1(CLK_AUDIO_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
+       GATE_AUDIO1(CLK_AUDIO_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
+       GATE_AUDIO1(CLK_AUDIO_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+       GATE_AUDIO1(CLK_AUDIO_INTDIR, "audio_intdir", "intdir_sel", 20),
+       GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
+       GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
+       /* AUDIO2 */
+       GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
+       GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
+       GATE_AUDIO2(CLK_AUDIO_UL3, "audio_ul3", "a1sys_hp_sel", 2),
+       GATE_AUDIO2(CLK_AUDIO_UL4, "audio_ul4", "a1sys_hp_sel", 3),
+       GATE_AUDIO2(CLK_AUDIO_UL5, "audio_ul5", "a1sys_hp_sel", 4),
+       GATE_AUDIO2(CLK_AUDIO_UL6, "audio_ul6", "a1sys_hp_sel", 5),
+       GATE_AUDIO2(CLK_AUDIO_DL1, "audio_dl1", "a1sys_hp_sel", 6),
+       GATE_AUDIO2(CLK_AUDIO_DL2, "audio_dl2", "a1sys_hp_sel", 7),
+       GATE_AUDIO2(CLK_AUDIO_DL3, "audio_dl3", "a1sys_hp_sel", 8),
+       GATE_AUDIO2(CLK_AUDIO_DL4, "audio_dl4", "a1sys_hp_sel", 9),
+       GATE_AUDIO2(CLK_AUDIO_DL5, "audio_dl5", "a1sys_hp_sel", 10),
+       GATE_AUDIO2(CLK_AUDIO_DL6, "audio_dl6", "a1sys_hp_sel", 11),
+       GATE_AUDIO2(CLK_AUDIO_DLMCH, "audio_dlmch", "a1sys_hp_sel", 12),
+       GATE_AUDIO2(CLK_AUDIO_ARB1, "audio_arb1", "a1sys_hp_sel", 13),
+       GATE_AUDIO2(CLK_AUDIO_AWB, "audio_awb", "a1sys_hp_sel", 14),
+       GATE_AUDIO2(CLK_AUDIO_AWB2, "audio_awb2", "a1sys_hp_sel", 15),
+       GATE_AUDIO2(CLK_AUDIO_DAI, "audio_dai", "a1sys_hp_sel", 16),
+       GATE_AUDIO2(CLK_AUDIO_MOD, "audio_mod", "a1sys_hp_sel", 17),
+       /* AUDIO3 */
+       GATE_AUDIO3(CLK_AUDIO_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
+       GATE_AUDIO3(CLK_AUDIO_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
+       GATE_AUDIO3(CLK_AUDIO_ASRCO3, "audio_asrco3", "asm_h_sel", 6),
+       GATE_AUDIO3(CLK_AUDIO_ASRCO4, "audio_asrco4", "asm_h_sel", 7),
+       GATE_AUDIO3(CLK_AUDIO_MEM_ASRC1, "audio_mem_asrc1", "asm_h_sel", 10),
+       GATE_AUDIO3(CLK_AUDIO_MEM_ASRC2, "audio_mem_asrc2", "asm_h_sel", 11),
+       GATE_AUDIO3(CLK_AUDIO_MEM_ASRC3, "audio_mem_asrc3", "asm_h_sel", 12),
+       GATE_AUDIO3(CLK_AUDIO_MEM_ASRC4, "audio_mem_asrc4", "asm_h_sel", 13),
+       GATE_AUDIO3(CLK_AUDIO_MEM_ASRC5, "audio_mem_asrc5", "asm_h_sel", 14),
+};
+
+static int clk_mt7622_audiosys_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       struct device_node *node = pdev->dev.of_node;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+       mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+                              clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_aud[] = {
+       {
+               .compatible = "mediatek,mt7622-audsys",
+               .data = clk_mt7622_audiosys_init,
+       }, {
+               /* sentinel */
+       }
+};
+
+static int clk_mt7622_aud_probe(struct platform_device *pdev)
+{
+       int (*clk_init)(struct platform_device *);
+       int r;
+
+       clk_init = of_device_get_match_data(&pdev->dev);
+       if (!clk_init)
+               return -EINVAL;
+
+       r = clk_init(pdev);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       return r;
+}
+
+static struct platform_driver clk_mt7622_aud_drv = {
+       .probe = clk_mt7622_aud_probe,
+       .driver = {
+               .name = "clk-mt7622-aud",
+               .of_match_table = of_match_clk_mt7622_aud,
+       },
+};
+
+builtin_platform_driver(clk_mt7622_aud_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
new file mode 100644 (file)
index 0000000..6328127
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ *        Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_ETH(_id, _name, _parent, _shift) {        \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &eth_cg_regs,                   \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+       }
+
+static const struct mtk_gate_regs eth_cg_regs = {
+       .set_ofs = 0x30,
+       .clr_ofs = 0x30,
+       .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate eth_clks[] = {
+       GATE_ETH(CLK_ETH_HSDMA_EN, "eth_hsdma_en", "eth_sel", 5),
+       GATE_ETH(CLK_ETH_ESW_EN, "eth_esw_en", "eth_500m", 6),
+       GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "txclk_src_pre", 7),
+       GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "txclk_src_pre", 8),
+       GATE_ETH(CLK_ETH_GP0_EN, "eth_gp0_en", "txclk_src_pre", 9),
+};
+
+static const struct mtk_gate_regs sgmii_cg_regs = {
+       .set_ofs = 0xE4,
+       .clr_ofs = 0xE4,
+       .sta_ofs = 0xE4,
+};
+
+#define GATE_SGMII(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &sgmii_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+       }
+
+static const struct mtk_gate sgmii_clks[] = {
+       GATE_SGMII(CLK_SGMII_TX250M_EN, "sgmii_tx250m_en",
+                  "ssusb_tx250m", 2),
+       GATE_SGMII(CLK_SGMII_RX250M_EN, "sgmii_rx250m_en",
+                  "ssusb_eq_rx250m", 3),
+       GATE_SGMII(CLK_SGMII_CDR_REF, "sgmii_cdr_ref",
+                  "ssusb_cdr_ref", 4),
+       GATE_SGMII(CLK_SGMII_CDR_FB, "sgmii_cdr_fb",
+                  "ssusb_cdr_fb", 5),
+};
+
+static int clk_mt7622_ethsys_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       struct device_node *node = pdev->dev.of_node;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
+
+       mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
+                              clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       mtk_register_reset_controller(node, 1, 0x34);
+
+       return r;
+}
+
+static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       struct device_node *node = pdev->dev.of_node;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
+
+       mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks),
+                              clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_eth[] = {
+       {
+               .compatible = "mediatek,mt7622-ethsys",
+               .data = clk_mt7622_ethsys_init,
+       }, {
+               .compatible = "mediatek,mt7622-sgmiisys",
+               .data = clk_mt7622_sgmiisys_init,
+       }, {
+               /* sentinel */
+       }
+};
+
+static int clk_mt7622_eth_probe(struct platform_device *pdev)
+{
+       int (*clk_init)(struct platform_device *);
+       int r;
+
+       clk_init = of_device_get_match_data(&pdev->dev);
+       if (!clk_init)
+               return -EINVAL;
+
+       r = clk_init(pdev);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       return r;
+}
+
+static struct platform_driver clk_mt7622_eth_drv = {
+       .probe = clk_mt7622_eth_probe,
+       .driver = {
+               .name = "clk-mt7622-eth",
+               .of_match_table = of_match_clk_mt7622_eth,
+       },
+};
+
+builtin_platform_driver(clk_mt7622_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
new file mode 100644 (file)
index 0000000..a6e8534
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ *        Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_PCIE(_id, _name, _parent, _shift) {       \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &pcie_cg_regs,                  \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+       }
+
+#define GATE_SSUSB(_id, _name, _parent, _shift) {      \
+               .id = _id,                              \
+               .name = _name,                          \
+               .parent_name = _parent,                 \
+               .regs = &ssusb_cg_regs,                 \
+               .shift = _shift,                        \
+               .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+       }
+
+static const struct mtk_gate_regs pcie_cg_regs = {
+       .set_ofs = 0x30,
+       .clr_ofs = 0x30,
+       .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate_regs ssusb_cg_regs = {
+       .set_ofs = 0x30,
+       .clr_ofs = 0x30,
+       .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate ssusb_clks[] = {
+       GATE_SSUSB(CLK_SSUSB_U2_PHY_1P_EN, "ssusb_u2_phy_1p",
+                  "to_u2_phy_1p", 0),
+       GATE_SSUSB(CLK_SSUSB_U2_PHY_EN, "ssusb_u2_phy_en", "to_u2_phy", 1),
+       GATE_SSUSB(CLK_SSUSB_REF_EN, "ssusb_ref_en", "to_usb3_ref", 5),
+       GATE_SSUSB(CLK_SSUSB_SYS_EN, "ssusb_sys_en", "to_usb3_sys", 6),
+       GATE_SSUSB(CLK_SSUSB_MCU_EN, "ssusb_mcu_en", "axi_sel", 7),
+       GATE_SSUSB(CLK_SSUSB_DMA_EN, "ssusb_dma_en", "hif_sel", 8),
+};
+
+static const struct mtk_gate pcie_clks[] = {
+       GATE_PCIE(CLK_PCIE_P1_AUX_EN, "pcie_p1_aux_en", "p1_1mhz", 12),
+       GATE_PCIE(CLK_PCIE_P1_OBFF_EN, "pcie_p1_obff_en", "free_run_4mhz", 13),
+       GATE_PCIE(CLK_PCIE_P1_AHB_EN, "pcie_p1_ahb_en", "axi_sel", 14),
+       GATE_PCIE(CLK_PCIE_P1_AXI_EN, "pcie_p1_axi_en", "hif_sel", 15),
+       GATE_PCIE(CLK_PCIE_P1_MAC_EN, "pcie_p1_mac_en", "pcie1_mac_en", 16),
+       GATE_PCIE(CLK_PCIE_P1_PIPE_EN, "pcie_p1_pipe_en", "pcie1_pipe_en", 17),
+       GATE_PCIE(CLK_PCIE_P0_AUX_EN, "pcie_p0_aux_en", "p0_1mhz", 18),
+       GATE_PCIE(CLK_PCIE_P0_OBFF_EN, "pcie_p0_obff_en", "free_run_4mhz", 19),
+       GATE_PCIE(CLK_PCIE_P0_AHB_EN, "pcie_p0_ahb_en", "axi_sel", 20),
+       GATE_PCIE(CLK_PCIE_P0_AXI_EN, "pcie_p0_axi_en", "hif_sel", 21),
+       GATE_PCIE(CLK_PCIE_P0_MAC_EN, "pcie_p0_mac_en", "pcie0_mac_en", 22),
+       GATE_PCIE(CLK_PCIE_P0_PIPE_EN, "pcie_p0_pipe_en", "pcie0_pipe_en", 23),
+       GATE_PCIE(CLK_SATA_AHB_EN, "sata_ahb_en", "axi_sel", 26),
+       GATE_PCIE(CLK_SATA_AXI_EN, "sata_axi_en", "hif_sel", 27),
+       GATE_PCIE(CLK_SATA_ASIC_EN, "sata_asic_en", "sata_asic", 28),
+       GATE_PCIE(CLK_SATA_RBC_EN, "sata_rbc_en", "sata_rbc", 29),
+       GATE_PCIE(CLK_SATA_PM_EN, "sata_pm_en", "univpll2_d4", 30),
+};
+
+static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       struct device_node *node = pdev->dev.of_node;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
+
+       mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
+                              clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       mtk_register_reset_controller(node, 1, 0x34);
+
+       return r;
+}
+
+static int clk_mt7622_pciesys_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       struct device_node *node = pdev->dev.of_node;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
+
+       mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
+                              clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       mtk_register_reset_controller(node, 1, 0x34);
+
+       return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_hif[] = {
+       {
+               .compatible = "mediatek,mt7622-pciesys",
+               .data = clk_mt7622_pciesys_init,
+       }, {
+               .compatible = "mediatek,mt7622-ssusbsys",
+               .data = clk_mt7622_ssusbsys_init,
+       }, {
+               /* sentinel */
+       }
+};
+
+static int clk_mt7622_hif_probe(struct platform_device *pdev)
+{
+       int (*clk_init)(struct platform_device *);
+       int r;
+
+       clk_init = of_device_get_match_data(&pdev->dev);
+       if (!clk_init)
+               return -EINVAL;
+
+       r = clk_init(pdev);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       return r;
+}
+
+static struct platform_driver clk_mt7622_hif_drv = {
+       .probe = clk_mt7622_hif_probe,
+       .driver = {
+               .name = "clk-mt7622-hif",
+               .of_match_table = of_match_clk_mt7622_hif,
+       },
+};
+
+builtin_platform_driver(clk_mt7622_hif_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
new file mode 100644 (file)
index 0000000..92f7e32
--- /dev/null
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ *        Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-cpumux.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+#include <linux/clk.h> /* for consumer */
+
+#define MT7622_PLL_FMAX                (2500UL * MHZ)
+#define CON0_MT7622_RST_BAR    BIT(27)
+
+#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+                       _pd_reg, _pd_shift, _tuner_reg, _pcw_reg,       \
+                       _pcw_shift, _div_table, _parent_name) {         \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .reg = _reg,                                            \
+               .pwr_reg = _pwr_reg,                                    \
+               .en_mask = _en_mask,                                    \
+               .flags = _flags,                                        \
+               .rst_bar_mask = CON0_MT7622_RST_BAR,                    \
+               .fmax = MT7622_PLL_FMAX,                                \
+               .pcwbits = _pcwbits,                                    \
+               .pd_reg = _pd_reg,                                      \
+               .pd_shift = _pd_shift,                                  \
+               .tuner_reg = _tuner_reg,                                \
+               .pcw_reg = _pcw_reg,                                    \
+               .pcw_shift = _pcw_shift,                                \
+               .div_table = _div_table,                                \
+               .parent_name = _parent_name,                            \
+       }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,    \
+                       _pd_reg, _pd_shift, _tuner_reg, _pcw_reg,       \
+                       _pcw_shift)                                     \
+       PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+                _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift,  \
+                NULL, "clkxtal")
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) {                    \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .parent_name = _parent,                                 \
+               .regs = &apmixed_cg_regs,                               \
+               .shift = _shift,                                        \
+               .ops = &mtk_clk_gate_ops_no_setclr_inv,                 \
+       }
+
+#define GATE_INFRA(_id, _name, _parent, _shift) {                      \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .parent_name = _parent,                                 \
+               .regs = &infra_cg_regs,                                 \
+               .shift = _shift,                                        \
+               .ops = &mtk_clk_gate_ops_setclr,                        \
+       }
+
+#define GATE_TOP0(_id, _name, _parent, _shift) {                       \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .parent_name = _parent,                                 \
+               .regs = &top0_cg_regs,                                  \
+               .shift = _shift,                                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,                     \
+       }
+
+#define GATE_TOP1(_id, _name, _parent, _shift) {                       \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .parent_name = _parent,                                 \
+               .regs = &top1_cg_regs,                                  \
+               .shift = _shift,                                        \
+               .ops = &mtk_clk_gate_ops_no_setclr,                     \
+       }
+
+#define GATE_PERI0(_id, _name, _parent, _shift) {                      \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .parent_name = _parent,                                 \
+               .regs = &peri0_cg_regs,                                 \
+               .shift = _shift,                                        \
+               .ops = &mtk_clk_gate_ops_setclr,                        \
+       }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) {                      \
+               .id = _id,                                              \
+               .name = _name,                                          \
+               .parent_name = _parent,                                 \
+               .regs = &peri1_cg_regs,                                 \
+               .shift = _shift,                                        \
+               .ops = &mtk_clk_gate_ops_setclr,                        \
+       }
+
+static DEFINE_SPINLOCK(mt7622_clk_lock);
+
+static const char * const infra_mux1_parents[] = {
+       "clkxtal",
+       "armpll",
+       "main_core_en",
+       "armpll"
+};
+
+static const char * const axi_parents[] = {
+       "clkxtal",
+       "syspll1_d2",
+       "syspll_d5",
+       "syspll1_d4",
+       "univpll_d5",
+       "univpll2_d2",
+       "univpll_d7"
+};
+
+static const char * const mem_parents[] = {
+       "clkxtal",
+       "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+       "clkxtal",
+       "syspll1_d8"
+};
+
+static const char * const eth_parents[] = {
+       "clkxtal",
+       "syspll1_d2",
+       "univpll1_d2",
+       "syspll1_d4",
+       "univpll_d5",
+       "clk_null",
+       "univpll_d7"
+};
+
+static const char * const pwm_parents[] = {
+       "clkxtal",
+       "univpll2_d4"
+};
+
+static const char * const f10m_ref_parents[] = {
+       "clkxtal",
+       "syspll4_d16"
+};
+
+static const char * const nfi_infra_parents[] = {
+       "clkxtal",
+       "clkxtal",
+       "clkxtal",
+       "clkxtal",
+       "clkxtal",
+       "clkxtal",
+       "clkxtal",
+       "clkxtal",
+       "univpll2_d8",
+       "syspll1_d8",
+       "univpll1_d8",
+       "syspll4_d2",
+       "univpll2_d4",
+       "univpll3_d2",
+       "syspll1_d4"
+};
+
+static const char * const flash_parents[] = {
+       "clkxtal",
+       "univpll_d80_d4",
+       "syspll2_d8",
+       "syspll3_d4",
+       "univpll3_d4",
+       "univpll1_d8",
+       "syspll2_d4",
+       "univpll2_d4"
+};
+
+static const char * const uart_parents[] = {
+       "clkxtal",
+       "univpll2_d8"
+};
+
+static const char * const spi0_parents[] = {
+       "clkxtal",
+       "syspll3_d2",
+       "clkxtal",
+       "syspll2_d4",
+       "syspll4_d2",
+       "univpll2_d4",
+       "univpll1_d8",
+       "clkxtal"
+};
+
+static const char * const spi1_parents[] = {
+       "clkxtal",
+       "syspll3_d2",
+       "clkxtal",
+       "syspll4_d4",
+       "syspll4_d2",
+       "univpll2_d4",
+       "univpll1_d8",
+       "clkxtal"
+};
+
+static const char * const msdc30_0_parents[] = {
+       "clkxtal",
+       "univpll2_d16",
+       "univ48m"
+};
+
+static const char * const a1sys_hp_parents[] = {
+       "clkxtal",
+       "aud1pll_ck",
+       "aud2pll_ck",
+       "clkxtal"
+};
+
+static const char * const intdir_parents[] = {
+       "clkxtal",
+       "syspll_d2",
+       "univpll_d2",
+       "sgmiipll_ck"
+};
+
+static const char * const aud_intbus_parents[] = {
+       "clkxtal",
+       "syspll1_d4",
+       "syspll4_d2",
+       "syspll3_d2"
+};
+
+static const char * const pmicspi_parents[] = {
+       "clkxtal",
+       "clk_null",
+       "clk_null",
+       "clk_null",
+       "clk_null",
+       "univpll2_d16"
+};
+
+static const char * const atb_parents[] = {
+       "clkxtal",
+       "syspll1_d2",
+       "syspll_d5"
+};
+
+static const char * const audio_parents[] = {
+       "clkxtal",
+       "syspll3_d4",
+       "syspll4_d4",
+       "univpll1_d16"
+};
+
+static const char * const usb20_parents[] = {
+       "clkxtal",
+       "univpll3_d4",
+       "syspll1_d8",
+       "clkxtal"
+};
+
+static const char * const aud1_parents[] = {
+       "clkxtal",
+       "aud1pll_ck"
+};
+
+static const char * const aud2_parents[] = {
+       "clkxtal",
+       "aud2pll_ck"
+};
+
+static const char * const asm_l_parents[] = {
+       "clkxtal",
+       "syspll_d5",
+       "univpll2_d2",
+       "univpll2_d4"
+};
+
+static const char * const apll1_ck_parents[] = {
+       "aud1_sel",
+       "aud2_sel"
+};
+
+static const char * const peribus_ck_parents[] = {
+       "syspll1_d8",
+       "syspll1_d4"
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+       .set_ofs = 0x8,
+       .clr_ofs = 0x8,
+       .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+       .set_ofs = 0x40,
+       .clr_ofs = 0x44,
+       .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+       .set_ofs = 0x120,
+       .clr_ofs = 0x120,
+       .sta_ofs = 0x120,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+       .set_ofs = 0x128,
+       .clr_ofs = 0x128,
+       .sta_ofs = 0x128,
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+       .set_ofs = 0x8,
+       .clr_ofs = 0x10,
+       .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+       .set_ofs = 0xC,
+       .clr_ofs = 0x14,
+       .sta_ofs = 0x1C,
+};
+
+static const struct mtk_pll_data plls[] = {
+       PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001,
+           PLL_AO, 21, 0x0204, 24, 0, 0x0204, 0),
+       PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001,
+           HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
+       PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001,
+           HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
+       PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001,
+           0, 21, 0x0300, 1, 0, 0x0304, 0),
+       PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001,
+           0, 21, 0x0314, 1, 0, 0x0318, 0),
+       PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0x00000001,
+           0, 31, 0x0324, 1, 0, 0x0328, 0),
+       PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0x00000001,
+           0, 31, 0x0334, 1, 0, 0x0338, 0),
+       PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0x00000001,
+           0, 21, 0x0344, 1, 0, 0x0348, 0),
+       PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001,
+           0, 21, 0x0358, 1, 0, 0x035C, 0),
+};
+
+static const struct mtk_gate apmixed_clks[] = {
+       GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+};
+
+static const struct mtk_gate infra_clks[] = {
+       GATE_INFRA(CLK_INFRA_DBGCLK_PD, "infra_dbgclk_pd", "axi_sel", 0),
+       GATE_INFRA(CLK_INFRA_TRNG, "trng_ck", "axi_sel", 2),
+       GATE_INFRA(CLK_INFRA_AUDIO_PD, "infra_audio_pd", "aud_intbus_sel", 5),
+       GATE_INFRA(CLK_INFRA_IRRX_PD, "infra_irrx_pd", "irrx_sel", 16),
+       GATE_INFRA(CLK_INFRA_APXGPT_PD, "infra_apxgpt_pd", "f10m_ref_sel", 18),
+       GATE_INFRA(CLK_INFRA_PMIC_PD, "infra_pmic_pd", "pmicspi_sel", 22),
+};
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+       FIXED_CLK(CLK_TOP_TO_U2_PHY, "to_u2_phy", "clkxtal",
+                 31250000),
+       FIXED_CLK(CLK_TOP_TO_U2_PHY_1P, "to_u2_phy_1p", "clkxtal",
+                 31250000),
+       FIXED_CLK(CLK_TOP_PCIE0_PIPE_EN, "pcie0_pipe_en", "clkxtal",
+                 125000000),
+       FIXED_CLK(CLK_TOP_PCIE1_PIPE_EN, "pcie1_pipe_en", "clkxtal",
+                 125000000),
+       FIXED_CLK(CLK_TOP_SSUSB_TX250M, "ssusb_tx250m", "clkxtal",
+                 250000000),
+       FIXED_CLK(CLK_TOP_SSUSB_EQ_RX250M, "ssusb_eq_rx250m", "clkxtal",
+                 250000000),
+       FIXED_CLK(CLK_TOP_SSUSB_CDR_REF, "ssusb_cdr_ref", "clkxtal",
+                 33333333),
+       FIXED_CLK(CLK_TOP_SSUSB_CDR_FB, "ssusb_cdr_fb", "clkxtal",
+                 50000000),
+       FIXED_CLK(CLK_TOP_SATA_ASIC, "sata_asic", "clkxtal",
+                 50000000),
+       FIXED_CLK(CLK_TOP_SATA_RBC, "sata_rbc", "clkxtal",
+                 50000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+       FACTOR(CLK_TOP_TO_USB3_SYS, "to_usb3_sys", "eth1pll", 1, 4),
+       FACTOR(CLK_TOP_P1_1MHZ, "p1_1mhz", "eth1pll", 1, 500),
+       FACTOR(CLK_TOP_4MHZ, "free_run_4mhz", "eth1pll", 1, 125),
+       FACTOR(CLK_TOP_P0_1MHZ, "p0_1mhz", "eth1pll", 1, 500),
+       FACTOR(CLK_TOP_TXCLK_SRC_PRE, "txclk_src_pre", "sgmiipll_d2", 1, 1),
+       FACTOR(CLK_TOP_RTC, "rtc", "clkxtal", 1, 1024),
+       FACTOR(CLK_TOP_MEMPLL, "mempll", "clkxtal", 32, 1),
+       FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+       FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+       FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 4),
+       FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 8),
+       FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 16),
+       FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 12),
+       FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "mainpll", 1, 24),
+       FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+       FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 10),
+       FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 20),
+       FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 14),
+       FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 28),
+       FACTOR(CLK_TOP_SYSPLL4_D16, "syspll4_d16", "mainpll", 1, 112),
+       FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+       FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 4),
+       FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 8),
+       FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 16),
+       FACTOR(CLK_TOP_UNIVPLL1_D16, "univpll1_d16", "univpll", 1, 32),
+       FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 6),
+       FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 12),
+       FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 24),
+       FACTOR(CLK_TOP_UNIVPLL2_D16, "univpll2_d16", "univpll", 1, 48),
+       FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+       FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 10),
+       FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 20),
+       FACTOR(CLK_TOP_UNIVPLL3_D16, "univpll3_d16", "univpll", 1, 80),
+       FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+       FACTOR(CLK_TOP_UNIVPLL_D80_D4, "univpll_d80_d4", "univpll", 1, 320),
+       FACTOR(CLK_TOP_UNIV48M, "univ48m", "univpll", 1, 25),
+       FACTOR(CLK_TOP_SGMIIPLL, "sgmiipll_ck", "sgmipll", 1, 1),
+       FACTOR(CLK_TOP_SGMIIPLL_D2, "sgmiipll_d2", "sgmipll", 1, 2),
+       FACTOR(CLK_TOP_AUD1PLL, "aud1pll_ck", "aud1pll", 1, 1),
+       FACTOR(CLK_TOP_AUD2PLL, "aud2pll_ck", "aud2pll", 1, 1),
+       FACTOR(CLK_TOP_AUD_I2S2_MCK, "aud_i2s2_mck", "i2s2_mck_sel", 1, 2),
+       FACTOR(CLK_TOP_TO_USB3_REF, "to_usb3_ref", "univpll2_d4", 1, 4),
+       FACTOR(CLK_TOP_PCIE1_MAC_EN, "pcie1_mac_en", "univpll1_d4", 1, 1),
+       FACTOR(CLK_TOP_PCIE0_MAC_EN, "pcie0_mac_en", "univpll1_d4", 1, 1),
+       FACTOR(CLK_TOP_ETH_500M, "eth_500m", "eth1pll", 1, 1),
+};
+
+static const struct mtk_gate top_clks[] = {
+       /* TOP0 */
+       GATE_TOP0(CLK_TOP_APLL1_DIV_PD, "apll1_ck_div_pd", "apll1_ck_div", 0),
+       GATE_TOP0(CLK_TOP_APLL2_DIV_PD, "apll2_ck_div_pd", "apll2_ck_div", 1),
+       GATE_TOP0(CLK_TOP_I2S0_MCK_DIV_PD, "i2s0_mck_div_pd", "i2s0_mck_div",
+                 2),
+       GATE_TOP0(CLK_TOP_I2S1_MCK_DIV_PD, "i2s1_mck_div_pd", "i2s1_mck_div",
+                 3),
+       GATE_TOP0(CLK_TOP_I2S2_MCK_DIV_PD, "i2s2_mck_div_pd", "i2s2_mck_div",
+                 4),
+       GATE_TOP0(CLK_TOP_I2S3_MCK_DIV_PD, "i2s3_mck_div_pd", "i2s3_mck_div",
+                 5),
+
+       /* TOP1 */
+       GATE_TOP1(CLK_TOP_A1SYS_HP_DIV_PD, "a1sys_div_pd", "a1sys_div", 0),
+       GATE_TOP1(CLK_TOP_A2SYS_HP_DIV_PD, "a2sys_div_pd", "a2sys_div", 16),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+       DIV_ADJ(CLK_TOP_APLL1_DIV, "apll1_ck_div", "apll1_ck_sel",
+               0x120, 24, 3),
+       DIV_ADJ(CLK_TOP_APLL2_DIV, "apll2_ck_div", "apll2_ck_sel",
+               0x120, 28, 3),
+       DIV_ADJ(CLK_TOP_I2S0_MCK_DIV, "i2s0_mck_div", "i2s0_mck_sel",
+               0x124, 0, 7),
+       DIV_ADJ(CLK_TOP_I2S1_MCK_DIV, "i2s1_mck_div", "i2s1_mck_sel",
+               0x124, 8, 7),
+       DIV_ADJ(CLK_TOP_I2S2_MCK_DIV, "i2s2_mck_div", "aud_i2s2_mck",
+               0x124, 16, 7),
+       DIV_ADJ(CLK_TOP_I2S3_MCK_DIV, "i2s3_mck_div", "i2s3_mck_sel",
+               0x124, 24, 7),
+       DIV_ADJ(CLK_TOP_A1SYS_HP_DIV, "a1sys_div", "a1sys_hp_sel",
+               0x128, 8, 7),
+       DIV_ADJ(CLK_TOP_A2SYS_HP_DIV, "a2sys_div", "a2sys_hp_sel",
+               0x128, 24, 7),
+};
+
+static const struct mtk_gate peri_clks[] = {
+       /* PERI0 */
+       GATE_PERI0(CLK_PERI_THERM_PD, "peri_therm_pd", "axi_sel", 1),
+       GATE_PERI0(CLK_PERI_PWM1_PD, "peri_pwm1_pd", "clkxtal", 2),
+       GATE_PERI0(CLK_PERI_PWM2_PD, "peri_pwm2_pd", "clkxtal", 3),
+       GATE_PERI0(CLK_PERI_PWM3_PD, "peri_pwm3_pd", "clkxtal", 4),
+       GATE_PERI0(CLK_PERI_PWM4_PD, "peri_pwm4_pd", "clkxtal", 5),
+       GATE_PERI0(CLK_PERI_PWM5_PD, "peri_pwm5_pd", "clkxtal", 6),
+       GATE_PERI0(CLK_PERI_PWM6_PD, "peri_pwm6_pd", "clkxtal", 7),
+       GATE_PERI0(CLK_PERI_PWM7_PD, "peri_pwm7_pd", "clkxtal", 8),
+       GATE_PERI0(CLK_PERI_PWM_PD, "peri_pwm_pd", "clkxtal", 9),
+       GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "axi_sel", 12),
+       GATE_PERI0(CLK_PERI_MSDC30_0_PD, "peri_msdc30_0", "msdc30_0_sel", 13),
+       GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1_sel", 14),
+       GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
+       GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "axi_sel", 18),
+       GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "axi_sel", 19),
+       GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "axi_sel", 20),
+       GATE_PERI0(CLK_PERI_UART4_PD, "peri_uart4_pd", "axi_sel", 21),
+       GATE_PERI0(CLK_PERI_BTIF_PD, "peri_btif_pd", "axi_sel", 22),
+       GATE_PERI0(CLK_PERI_I2C0_PD, "peri_i2c0_pd", "axi_sel", 23),
+       GATE_PERI0(CLK_PERI_I2C1_PD, "peri_i2c1_pd", "axi_sel", 24),
+       GATE_PERI0(CLK_PERI_I2C2_PD, "peri_i2c2_pd", "axi_sel", 25),
+       GATE_PERI0(CLK_PERI_SPI1_PD, "peri_spi1_pd", "spi1_sel", 26),
+       GATE_PERI0(CLK_PERI_AUXADC_PD, "peri_auxadc_pd", "clkxtal", 27),
+       GATE_PERI0(CLK_PERI_SPI0_PD, "peri_spi0_pd", "spi0_sel", 28),
+       GATE_PERI0(CLK_PERI_SNFI_PD, "peri_snfi_pd", "nfi_infra_sel", 29),
+       GATE_PERI0(CLK_PERI_NFI_PD, "peri_nfi_pd", "axi_sel", 30),
+       GATE_PERI0(CLK_PERI_NFIECC_PD, "peri_nfiecc_pd", "axi_sel", 31),
+
+       /* PERI1 */
+       GATE_PERI1(CLK_PERI_FLASH_PD, "peri_flash_pd", "flash_sel", 1),
+       GATE_PERI1(CLK_PERI_IRTX_PD, "peri_irtx_pd", "irtx_sel", 2),
+};
+
+static struct mtk_composite infra_muxes[] __initdata = {
+       MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents,
+           0x000, 2, 2),
+};
+
+static struct mtk_composite top_muxes[] = {
+       /* CLK_CFG_0 */
+       MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+                0x040, 0, 3, 7),
+       MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+                0x040, 8, 1, 15),
+       MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+                0x040, 16, 1, 23),
+       MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+                0x040, 24, 3, 31),
+
+       /* CLK_CFG_1 */
+       MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+                0x050, 0, 2, 7),
+       MUX_GATE(CLK_TOP_F10M_REF_SEL, "f10m_ref_sel", f10m_ref_parents,
+                0x050, 8, 1, 15),
+       MUX_GATE(CLK_TOP_NFI_INFRA_SEL, "nfi_infra_sel", nfi_infra_parents,
+                0x050, 16, 4, 23),
+       MUX_GATE(CLK_TOP_FLASH_SEL, "flash_sel", flash_parents,
+                0x050, 24, 3, 31),
+
+       /* CLK_CFG_2 */
+       MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+                0x060, 0, 1, 7),
+       MUX_GATE(CLK_TOP_SPI0_SEL, "spi0_sel", spi0_parents,
+                0x060, 8, 3, 15),
+       MUX_GATE(CLK_TOP_SPI1_SEL, "spi1_sel", spi1_parents,
+                0x060, 16, 3, 23),
+       MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", uart_parents,
+                0x060, 24, 3, 31),
+
+       /* CLK_CFG_3 */
+       MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_parents,
+                0x070, 0, 3, 7),
+       MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_0_parents,
+                0x070, 8, 3, 15),
+       MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel", a1sys_hp_parents,
+                0x070, 16, 2, 23),
+       MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel", a1sys_hp_parents,
+                0x070, 24, 2, 31),
+
+       /* CLK_CFG_4 */
+       MUX_GATE(CLK_TOP_INTDIR_SEL, "intdir_sel", intdir_parents,
+                0x080, 0, 2, 7),
+       MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+                0x080, 8, 2, 15),
+       MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+                0x080, 16, 3, 23),
+       MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", ddrphycfg_parents,
+                0x080, 24, 2, 31),
+
+       /* CLK_CFG_5 */
+       MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents,
+                0x090, 0, 2, 7),
+       MUX_GATE(CLK_TOP_HIF_SEL, "hif_sel", eth_parents,
+                0x090, 8, 3, 15),
+       MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+                0x090, 16, 2, 23),
+       MUX_GATE(CLK_TOP_U2_SEL, "usb20_sel", usb20_parents,
+                0x090, 24, 2, 31),
+
+       /* CLK_CFG_6 */
+       MUX_GATE(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+                0x0A0, 0, 1, 7),
+       MUX_GATE(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+                0x0A0, 8, 1, 15),
+       MUX_GATE(CLK_TOP_IRRX_SEL, "irrx_sel", f10m_ref_parents,
+                0x0A0, 16, 1, 23),
+       MUX_GATE(CLK_TOP_IRTX_SEL, "irtx_sel", f10m_ref_parents,
+                0x0A0, 24, 1, 31),
+
+       /* CLK_CFG_7 */
+       MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel", asm_l_parents,
+                0x0B0, 0, 2, 7),
+       MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel", asm_l_parents,
+                0x0B0, 8, 2, 15),
+       MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel", asm_l_parents,
+                0x0B0, 16, 2, 23),
+
+       /* CLK_AUDDIV_0 */
+       MUX(CLK_TOP_APLL1_SEL, "apll1_ck_sel", apll1_ck_parents,
+           0x120, 6, 1),
+       MUX(CLK_TOP_APLL2_SEL, "apll2_ck_sel", apll1_ck_parents,
+           0x120, 7, 1),
+       MUX(CLK_TOP_I2S0_MCK_SEL, "i2s0_mck_sel", apll1_ck_parents,
+           0x120, 8, 1),
+       MUX(CLK_TOP_I2S1_MCK_SEL, "i2s1_mck_sel", apll1_ck_parents,
+           0x120, 9, 1),
+       MUX(CLK_TOP_I2S2_MCK_SEL, "i2s2_mck_sel", apll1_ck_parents,
+           0x120, 10, 1),
+       MUX(CLK_TOP_I2S3_MCK_SEL, "i2s3_mck_sel", apll1_ck_parents,
+           0x120, 11, 1),
+};
+
+static struct mtk_composite peri_muxes[] = {
+       /* PERI_GLOBALCON_CKSEL */
+       MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
+};
+
+static int mtk_topckgen_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       void __iomem *base;
+       struct device_node *node = pdev->dev.of_node;
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+       mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+                                   clk_data);
+
+       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+                                clk_data);
+
+       mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+                                   base, &mt7622_clk_lock, clk_data);
+
+       mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+                                 base, &mt7622_clk_lock, clk_data);
+
+       mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+                              clk_data);
+
+       clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]);
+       clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]);
+       clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+
+       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int __init mtk_infrasys_init(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct clk_onecell_data *clk_data;
+       int r;
+
+       clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+       mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+                              clk_data);
+
+       mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
+                                 clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get,
+                               clk_data);
+       if (r)
+               return r;
+
+       mtk_register_reset_controller(node, 1, 0x30);
+
+       return 0;
+}
+
+static int mtk_apmixedsys_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       struct device_node *node = pdev->dev.of_node;
+
+       clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+       if (!clk_data)
+               return -ENOMEM;
+
+       mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
+                             clk_data);
+
+       mtk_clk_register_gates(node, apmixed_clks,
+                              ARRAY_SIZE(apmixed_clks), clk_data);
+
+       clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
+       clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]);
+
+       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int mtk_pericfg_init(struct platform_device *pdev)
+{
+       struct clk_onecell_data *clk_data;
+       void __iomem *base;
+       int r;
+       struct device_node *node = pdev->dev.of_node;
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+       mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+                              clk_data);
+
+       mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
+                                   &mt7622_clk_lock, clk_data);
+
+       r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       if (r)
+               return r;
+
+       clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]);
+
+       mtk_register_reset_controller(node, 2, 0x0);
+
+       return 0;
+}
+
+static const struct of_device_id of_match_clk_mt7622[] = {
+       {
+               .compatible = "mediatek,mt7622-apmixedsys",
+               .data = mtk_apmixedsys_init,
+       }, {
+               .compatible = "mediatek,mt7622-infracfg",
+               .data = mtk_infrasys_init,
+       }, {
+               .compatible = "mediatek,mt7622-topckgen",
+               .data = mtk_topckgen_init,
+       }, {
+               .compatible = "mediatek,mt7622-pericfg",
+               .data = mtk_pericfg_init,
+       }, {
+               /* sentinel */
+       }
+};
+
+static int clk_mt7622_probe(struct platform_device *pdev)
+{
+       int (*clk_init)(struct platform_device *);
+       int r;
+
+       clk_init = of_device_get_match_data(&pdev->dev);
+       if (!clk_init)
+               return -EINVAL;
+
+       r = clk_init(pdev);
+       if (r)
+               dev_err(&pdev->dev,
+                       "could not register clock provider: %s: %d\n",
+                       pdev->name, r);
+
+       return r;
+}
+
+static struct platform_driver clk_mt7622_drv = {
+       .probe = clk_mt7622_probe,
+       .driver = {
+               .name = "clk-mt7622",
+               .of_match_table = of_match_clk_mt7622,
+       },
+};
+
+static int clk_mt7622_init(void)
+{
+       return platform_driver_register(&clk_mt7622_drv);
+}
+
+arch_initcall(clk_mt7622_init);
index f5d6b70ce189372602f2c6f8aaa1d2694ba8e65e..f10250dcece4ebd44e4216864da1a90f0a802dbd 100644 (file)
@@ -207,6 +207,8 @@ struct mtk_pll_data {
        uint32_t en_mask;
        uint32_t pd_reg;
        uint32_t tuner_reg;
+       uint32_t tuner_en_reg;
+       uint8_t tuner_en_bit;
        int pd_shift;
        unsigned int flags;
        const struct clk_ops *ops;
@@ -216,6 +218,7 @@ struct mtk_pll_data {
        uint32_t pcw_reg;
        int pcw_shift;
        const struct mtk_pll_div_table *div_table;
+       const char *parent_name;
 };
 
 void mtk_clk_register_plls(struct device_node *node,
index a409142e93462dd0eb7fc599f43479a26b2c695c..f54e4015b0b1f3c005e7d82b5fdffa6dc826a8ad 100644 (file)
@@ -47,6 +47,7 @@ struct mtk_clk_pll {
        void __iomem    *pd_addr;
        void __iomem    *pwr_addr;
        void __iomem    *tuner_addr;
+       void __iomem    *tuner_en_addr;
        void __iomem    *pcw_addr;
        const struct mtk_pll_data *data;
 };
@@ -227,7 +228,10 @@ static int mtk_pll_prepare(struct clk_hw *hw)
        r |= pll->data->en_mask;
        writel(r, pll->base_addr + REG_CON0);
 
-       if (pll->tuner_addr) {
+       if (pll->tuner_en_addr) {
+               r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
+               writel(r, pll->tuner_en_addr);
+       } else if (pll->tuner_addr) {
                r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
                writel(r, pll->tuner_addr);
        }
@@ -254,7 +258,10 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
                writel(r, pll->base_addr + REG_CON0);
        }
 
-       if (pll->tuner_addr) {
+       if (pll->tuner_en_addr) {
+               r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
+               writel(r, pll->tuner_en_addr);
+       } else if (pll->tuner_addr) {
                r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
                writel(r, pll->tuner_addr);
        }
@@ -297,13 +304,18 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
        pll->pcw_addr = base + data->pcw_reg;
        if (data->tuner_reg)
                pll->tuner_addr = base + data->tuner_reg;
+       if (data->tuner_en_reg)
+               pll->tuner_en_addr = base + data->tuner_en_reg;
        pll->hw.init = &init;
        pll->data = data;
 
        init.name = data->name;
        init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
        init.ops = &mtk_pll_ops;
-       init.parent_names = &parent_name;
+       if (data->parent_name)
+               init.parent_names = &data->parent_name;
+       else
+               init.parent_names = &parent_name;
        init.num_parents = 1;
 
        clk = clk_register(NULL, &pll->hw);
index b2d1e8ed7152b75f11352f58ff40cad71a1dab57..ae385310e98090de90002b16d1829ecaad6819b3 100644 (file)
@@ -1131,6 +1131,253 @@ static struct clk_gate gxbb_sd_emmc_c_clk0 = {
        },
 };
 
+/* VPU Clock */
+
+static u32 mux_table_vpu[] = {0, 1, 2, 3};
+static const char * const gxbb_vpu_parent_names[] = {
+       "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_mux gxbb_vpu_0_sel = {
+       .reg = (void *)HHI_VPU_CLK_CNTL,
+       .mask = 0x3,
+       .shift = 9,
+       .lock = &clk_lock,
+       .table = mux_table_vpu,
+       .hw.init = &(struct clk_init_data){
+               .name = "vpu_0_sel",
+               .ops = &clk_mux_ops,
+               /*
+                * bits 9:10 selects from 4 possible parents:
+                * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+                */
+               .parent_names = gxbb_vpu_parent_names,
+               .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+               .flags = CLK_SET_RATE_NO_REPARENT,
+       },
+};
+
+static struct clk_divider gxbb_vpu_0_div = {
+       .reg = (void *)HHI_VPU_CLK_CNTL,
+       .shift = 0,
+       .width = 7,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data){
+               .name = "vpu_0_div",
+               .ops = &clk_divider_ops,
+               .parent_names = (const char *[]){ "vpu_0_sel" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_gate gxbb_vpu_0 = {
+       .reg = (void *)HHI_VPU_CLK_CNTL,
+       .bit_idx = 8,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data) {
+               .name = "vpu_0",
+               .ops = &clk_gate_ops,
+               .parent_names = (const char *[]){ "vpu_0_div" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+       },
+};
+
+static struct clk_mux gxbb_vpu_1_sel = {
+       .reg = (void *)HHI_VPU_CLK_CNTL,
+       .mask = 0x3,
+       .shift = 25,
+       .lock = &clk_lock,
+       .table = mux_table_vpu,
+       .hw.init = &(struct clk_init_data){
+               .name = "vpu_1_sel",
+               .ops = &clk_mux_ops,
+               /*
+                * bits 25:26 selects from 4 possible parents:
+                * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+                */
+               .parent_names = gxbb_vpu_parent_names,
+               .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+               .flags = CLK_SET_RATE_NO_REPARENT,
+       },
+};
+
+static struct clk_divider gxbb_vpu_1_div = {
+       .reg = (void *)HHI_VPU_CLK_CNTL,
+       .shift = 16,
+       .width = 7,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data){
+               .name = "vpu_1_div",
+               .ops = &clk_divider_ops,
+               .parent_names = (const char *[]){ "vpu_1_sel" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_gate gxbb_vpu_1 = {
+       .reg = (void *)HHI_VPU_CLK_CNTL,
+       .bit_idx = 24,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data) {
+               .name = "vpu_1",
+               .ops = &clk_gate_ops,
+               .parent_names = (const char *[]){ "vpu_1_div" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+       },
+};
+
+static struct clk_mux gxbb_vpu = {
+       .reg = (void *)HHI_VPU_CLK_CNTL,
+       .mask = 1,
+       .shift = 31,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data){
+               .name = "vpu",
+               .ops = &clk_mux_ops,
+               /*
+                * bit 31 selects from 2 possible parents:
+                * vpu_0 or vpu_1
+                */
+               .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+               .num_parents = 2,
+               .flags = CLK_SET_RATE_NO_REPARENT,
+       },
+};
+
+/* VAPB Clock */
+
+static u32 mux_table_vapb[] = {0, 1, 2, 3};
+static const char * const gxbb_vapb_parent_names[] = {
+       "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_mux gxbb_vapb_0_sel = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .mask = 0x3,
+       .shift = 9,
+       .lock = &clk_lock,
+       .table = mux_table_vapb,
+       .hw.init = &(struct clk_init_data){
+               .name = "vapb_0_sel",
+               .ops = &clk_mux_ops,
+               /*
+                * bits 9:10 selects from 4 possible parents:
+                * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+                */
+               .parent_names = gxbb_vapb_parent_names,
+               .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+               .flags = CLK_SET_RATE_NO_REPARENT,
+       },
+};
+
+static struct clk_divider gxbb_vapb_0_div = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .shift = 0,
+       .width = 7,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data){
+               .name = "vapb_0_div",
+               .ops = &clk_divider_ops,
+               .parent_names = (const char *[]){ "vapb_0_sel" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_gate gxbb_vapb_0 = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .bit_idx = 8,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data) {
+               .name = "vapb_0",
+               .ops = &clk_gate_ops,
+               .parent_names = (const char *[]){ "vapb_0_div" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+       },
+};
+
+static struct clk_mux gxbb_vapb_1_sel = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .mask = 0x3,
+       .shift = 25,
+       .lock = &clk_lock,
+       .table = mux_table_vapb,
+       .hw.init = &(struct clk_init_data){
+               .name = "vapb_1_sel",
+               .ops = &clk_mux_ops,
+               /*
+                * bits 25:26 selects from 4 possible parents:
+                * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+                */
+               .parent_names = gxbb_vapb_parent_names,
+               .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+               .flags = CLK_SET_RATE_NO_REPARENT,
+       },
+};
+
+static struct clk_divider gxbb_vapb_1_div = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .shift = 16,
+       .width = 7,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data){
+               .name = "vapb_1_div",
+               .ops = &clk_divider_ops,
+               .parent_names = (const char *[]){ "vapb_1_sel" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_gate gxbb_vapb_1 = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .bit_idx = 24,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data) {
+               .name = "vapb_1",
+               .ops = &clk_gate_ops,
+               .parent_names = (const char *[]){ "vapb_1_div" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+       },
+};
+
+static struct clk_mux gxbb_vapb_sel = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .mask = 1,
+       .shift = 31,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data){
+               .name = "vapb_sel",
+               .ops = &clk_mux_ops,
+               /*
+                * bit 31 selects from 2 possible parents:
+                * vapb_0 or vapb_1
+                */
+               .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
+               .num_parents = 2,
+               .flags = CLK_SET_RATE_NO_REPARENT,
+       },
+};
+
+static struct clk_gate gxbb_vapb = {
+       .reg = (void *)HHI_VAPBCLK_CNTL,
+       .bit_idx = 30,
+       .lock = &clk_lock,
+       .hw.init = &(struct clk_init_data) {
+               .name = "vapb",
+               .ops = &clk_gate_ops,
+               .parent_names = (const char *[]){ "vapb_sel" },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+       },
+};
+
 /* Everything Else (EE) domain gates */
 static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
 static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1349,6 +1596,21 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
                [CLKID_SD_EMMC_C_CLK0_SEL]  = &gxbb_sd_emmc_c_clk0_sel.hw,
                [CLKID_SD_EMMC_C_CLK0_DIV]  = &gxbb_sd_emmc_c_clk0_div.hw,
                [CLKID_SD_EMMC_C_CLK0]      = &gxbb_sd_emmc_c_clk0.hw,
+               [CLKID_VPU_0_SEL]           = &gxbb_vpu_0_sel.hw,
+               [CLKID_VPU_0_DIV]           = &gxbb_vpu_0_div.hw,
+               [CLKID_VPU_0]               = &gxbb_vpu_0.hw,
+               [CLKID_VPU_1_SEL]           = &gxbb_vpu_1_sel.hw,
+               [CLKID_VPU_1_DIV]           = &gxbb_vpu_1_div.hw,
+               [CLKID_VPU_1]               = &gxbb_vpu_1.hw,
+               [CLKID_VPU]                 = &gxbb_vpu.hw,
+               [CLKID_VAPB_0_SEL]          = &gxbb_vapb_0_sel.hw,
+               [CLKID_VAPB_0_DIV]          = &gxbb_vapb_0_div.hw,
+               [CLKID_VAPB_0]              = &gxbb_vapb_0.hw,
+               [CLKID_VAPB_1_SEL]          = &gxbb_vapb_1_sel.hw,
+               [CLKID_VAPB_1_DIV]          = &gxbb_vapb_1_div.hw,
+               [CLKID_VAPB_1]              = &gxbb_vapb_1.hw,
+               [CLKID_VAPB_SEL]            = &gxbb_vapb_sel.hw,
+               [CLKID_VAPB]                = &gxbb_vapb.hw,
                [NR_CLKS]                   = NULL,
        },
        .num = NR_CLKS,
@@ -1481,6 +1743,21 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
                [CLKID_SD_EMMC_C_CLK0_SEL]  = &gxbb_sd_emmc_c_clk0_sel.hw,
                [CLKID_SD_EMMC_C_CLK0_DIV]  = &gxbb_sd_emmc_c_clk0_div.hw,
                [CLKID_SD_EMMC_C_CLK0]      = &gxbb_sd_emmc_c_clk0.hw,
+               [CLKID_VPU_0_SEL]           = &gxbb_vpu_0_sel.hw,
+               [CLKID_VPU_0_DIV]           = &gxbb_vpu_0_div.hw,
+               [CLKID_VPU_0]               = &gxbb_vpu_0.hw,
+               [CLKID_VPU_1_SEL]           = &gxbb_vpu_1_sel.hw,
+               [CLKID_VPU_1_DIV]           = &gxbb_vpu_1_div.hw,
+               [CLKID_VPU_1]               = &gxbb_vpu_1.hw,
+               [CLKID_VPU]                 = &gxbb_vpu.hw,
+               [CLKID_VAPB_0_SEL]          = &gxbb_vapb_0_sel.hw,
+               [CLKID_VAPB_0_DIV]          = &gxbb_vapb_0_div.hw,
+               [CLKID_VAPB_0]              = &gxbb_vapb_0.hw,
+               [CLKID_VAPB_1_SEL]          = &gxbb_vapb_1_sel.hw,
+               [CLKID_VAPB_1_DIV]          = &gxbb_vapb_1_div.hw,
+               [CLKID_VAPB_1]              = &gxbb_vapb_1.hw,
+               [CLKID_VAPB_SEL]            = &gxbb_vapb_sel.hw,
+               [CLKID_VAPB]                = &gxbb_vapb.hw,
                [NR_CLKS]                   = NULL,
        },
        .num = NR_CLKS,
@@ -1600,6 +1877,11 @@ static struct clk_gate *const gxbb_clk_gates[] = {
        &gxbb_sd_emmc_a_clk0,
        &gxbb_sd_emmc_b_clk0,
        &gxbb_sd_emmc_c_clk0,
+       &gxbb_vpu_0,
+       &gxbb_vpu_1,
+       &gxbb_vapb_0,
+       &gxbb_vapb_1,
+       &gxbb_vapb,
 };
 
 static struct clk_mux *const gxbb_clk_muxes[] = {
@@ -1615,6 +1897,12 @@ static struct clk_mux *const gxbb_clk_muxes[] = {
        &gxbb_sd_emmc_a_clk0_sel,
        &gxbb_sd_emmc_b_clk0_sel,
        &gxbb_sd_emmc_c_clk0_sel,
+       &gxbb_vpu_0_sel,
+       &gxbb_vpu_1_sel,
+       &gxbb_vpu,
+       &gxbb_vapb_0_sel,
+       &gxbb_vapb_1_sel,
+       &gxbb_vapb_sel,
 };
 
 static struct clk_divider *const gxbb_clk_dividers[] = {
@@ -1627,6 +1915,10 @@ static struct clk_divider *const gxbb_clk_dividers[] = {
        &gxbb_sd_emmc_a_clk0_div,
        &gxbb_sd_emmc_b_clk0_div,
        &gxbb_sd_emmc_c_clk0_div,
+       &gxbb_vpu_0_div,
+       &gxbb_vpu_1_div,
+       &gxbb_vapb_0_div,
+       &gxbb_vapb_1_div,
 };
 
 static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = {
index 5b1d4b374d1c21dfa9ded8167b5e9d32bad95b77..aee6fbba20043275cd25adce6b2548febcbea2b9 100644 (file)
 #define CLKID_SD_EMMC_B_CLK0_DIV  121
 #define CLKID_SD_EMMC_C_CLK0_SEL  123
 #define CLKID_SD_EMMC_C_CLK0_DIV  124
+#define CLKID_VPU_0_DIV                  127
+#define CLKID_VPU_1_DIV                  130
+#define CLKID_VAPB_0_DIV         134
+#define CLKID_VAPB_1_DIV         137
 
-#define NR_CLKS                          126
+#define NR_CLKS                          141
 
 /* include the CLKIDs that have been made part of the DT binding */
 #include <dt-bindings/clock/gxbb-clkc.h>
index 4c717db05f2deb61a4519c7b112d3c75199371fe..fb294ada0b03d770de27a02b151c9e311b2462cf 100644 (file)
@@ -114,7 +114,7 @@ static void clk_apbc_unprepare(struct clk_hw *hw)
                spin_unlock_irqrestore(apbc->lock, flags);
 }
 
-static struct clk_ops clk_apbc_ops = {
+static const struct clk_ops clk_apbc_ops = {
        .prepare = clk_apbc_prepare,
        .unprepare = clk_apbc_unprepare,
 };
index 47b5542ce50f3ec8c356eeb4bdbf9c580f207bfd..b7ce8f52026e40be24e5683d458dbb3597bb860e 100644 (file)
@@ -60,7 +60,7 @@ static void clk_apmu_disable(struct clk_hw *hw)
                spin_unlock_irqrestore(apmu->lock, flags);
 }
 
-static struct clk_ops clk_apmu_ops = {
+static const struct clk_ops clk_apmu_ops = {
        .enable = clk_apmu_enable,
        .disable = clk_apmu_disable,
 };
index 584a9927993b41f73df73f40ec598f4e7d45ca1d..cb43d54735b054cf971875e02d1ee30caae12652 100644 (file)
@@ -149,7 +149,7 @@ static void clk_factor_init(struct clk_hw *hw)
                spin_unlock_irqrestore(factor->lock, flags);
 }
 
-static struct clk_ops clk_factor_ops = {
+static const struct clk_ops clk_factor_ops = {
        .recalc_rate = clk_factor_recalc_rate,
        .round_rate = clk_factor_round_rate,
        .set_rate = clk_factor_set_rate,
@@ -172,10 +172,8 @@ struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
        }
 
        factor = kzalloc(sizeof(*factor), GFP_KERNEL);
-       if (!factor) {
-               pr_err("%s: could not allocate factor  clk\n", __func__);
+       if (!factor)
                return ERR_PTR(-ENOMEM);
-       }
 
        /* struct clk_aux assignments */
        factor->base = base;
index d20cd3431ac27547121d8bc6412c460a45b1ff10..7355595c42e21e4249590b40292cc2a814e7002b 100644 (file)
@@ -103,10 +103,8 @@ struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
 
        /* allocate the gate */
        gate = kzalloc(sizeof(*gate), GFP_KERNEL);
-       if (!gate) {
-               pr_err("%s:%s could not allocate gate clk\n", __func__, name);
+       if (!gate)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &mmp_clk_gate_ops;
index c554833cffc509a8d2e120c4af5873b8100e3018..90814b2613c0c2c4e01a1b39b186ee06d1efbcc1 100644 (file)
@@ -229,7 +229,7 @@ static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
                        parent_rate = clk_hw_get_rate(parent);
                        mix_rate = parent_rate / item->divisor;
                        gap = abs(mix_rate - req->rate);
-                       if (parent_best == NULL || gap < gap_best) {
+                       if (!parent_best || gap < gap_best) {
                                parent_best = parent;
                                parent_rate_best = parent_rate;
                                mix_rate_best = mix_rate;
@@ -247,7 +247,7 @@ static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
                                div = _get_div(mix, j);
                                mix_rate = parent_rate / div;
                                gap = abs(mix_rate - req->rate);
-                               if (parent_best == NULL || gap < gap_best) {
+                               if (!parent_best || gap < gap_best) {
                                        parent_best = parent;
                                        parent_rate_best = parent_rate;
                                        mix_rate_best = mix_rate;
@@ -451,11 +451,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
        size_t table_bytes;
 
        mix = kzalloc(sizeof(*mix), GFP_KERNEL);
-       if (!mix) {
-               pr_err("%s:%s: could not allocate mmp mix clk\n",
-                       __func__, name);
+       if (!mix)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.flags = flags | CLK_GET_RATE_NOCACHE;
@@ -467,12 +464,9 @@ struct clk *mmp_clk_register_mix(struct device *dev,
        if (config->table) {
                table_bytes = sizeof(*config->table) * config->table_size;
                mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
-               if (!mix->table) {
-                       pr_err("%s:%s: could not allocate mmp mix table\n",
-                               __func__, name);
-                       kfree(mix);
-                       return ERR_PTR(-ENOMEM);
-               }
+               if (!mix->table)
+                       goto free_mix;
+
                mix->table_size = config->table_size;
        }
 
@@ -481,11 +475,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
                mix->mux_table = kmemdup(config->mux_table, table_bytes,
                                         GFP_KERNEL);
                if (!mix->mux_table) {
-                       pr_err("%s:%s: could not allocate mmp mix mux-table\n",
-                               __func__, name);
                        kfree(mix->table);
-                       kfree(mix);
-                       return ERR_PTR(-ENOMEM);
+                       goto free_mix;
                }
        }
 
@@ -509,4 +500,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
        }
 
        return clk;
+
+free_mix:
+       kfree(mix);
+       return ERR_PTR(-ENOMEM);
 }
index 038023483b98fe1f4d2cbed08f8a6525e7360d53..7460031714da970569aee9209279b62953cdfa5d 100644 (file)
@@ -83,19 +83,19 @@ void __init mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
        void __iomem *apbc_base;
 
        mpmu_base = ioremap(mpmu_phys, SZ_4K);
-       if (mpmu_base == NULL) {
+       if (!mpmu_base) {
                pr_err("error to ioremap MPMU base\n");
                return;
        }
 
        apmu_base = ioremap(apmu_phys, SZ_4K);
-       if (apmu_base == NULL) {
+       if (!apmu_base) {
                pr_err("error to ioremap APMU base\n");
                return;
        }
 
        apbc_base = ioremap(apbc_phys, SZ_4K);
-       if (apbc_base == NULL) {
+       if (!apbc_base) {
                pr_err("error to ioremap APBC base\n");
                return;
        }
index a9ef9209532aa66605c1eca9de7b1f74d52ae09c..8e2551ab846251ce4bdd0b4e13fc70e832db4782 100644 (file)
@@ -75,19 +75,19 @@ void __init pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
        void __iomem *apbc_base;
 
        mpmu_base = ioremap(mpmu_phys, SZ_4K);
-       if (mpmu_base == NULL) {
+       if (!mpmu_base) {
                pr_err("error to ioremap MPMU base\n");
                return;
        }
 
        apmu_base = ioremap(apmu_phys, SZ_4K);
-       if (apmu_base == NULL) {
+       if (!apmu_base) {
                pr_err("error to ioremap APMU base\n");
                return;
        }
 
        apbc_base = ioremap(apbc_phys, SZ_4K);
-       if (apbc_base == NULL) {
+       if (!apbc_base) {
                pr_err("error to ioremap APBC base\n");
                return;
        }
index a520cf7702a11649fe14581a59cdb063c859b461..7a79651419183dfa044bcd8ccfd69d87953ce810 100644 (file)
@@ -74,25 +74,25 @@ void __init pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
        void __iomem *apbc_base;
 
        mpmu_base = ioremap(mpmu_phys, SZ_4K);
-       if (mpmu_base == NULL) {
+       if (!mpmu_base) {
                pr_err("error to ioremap MPMU base\n");
                return;
        }
 
        apmu_base = ioremap(apmu_phys, SZ_4K);
-       if (apmu_base == NULL) {
+       if (!apmu_base) {
                pr_err("error to ioremap APMU base\n");
                return;
        }
 
        apbcp_base = ioremap(apbcp_phys, SZ_4K);
-       if (apbcp_base == NULL) {
+       if (!apbcp_base) {
                pr_err("error to ioremap APBC extension base\n");
                return;
        }
 
        apbc_base = ioremap(apbc_phys, SZ_4K);
-       if (apbc_base == NULL) {
+       if (!apbc_base) {
                pr_err("error to ioremap APBC base\n");
                return;
        }
index f75e989c578ffca5ce8474262a60b9809e13fd7f..ccebd014fc1ea70350ad1bc4a4a9594055cf9786 100644 (file)
@@ -67,7 +67,7 @@ static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
        return ret;
 }
 
-static struct clk_ops clk_div_ops = {
+static const struct clk_ops clk_div_ops = {
        .recalc_rate = clk_div_recalc_rate,
        .round_rate = clk_div_round_rate,
        .set_rate = clk_div_set_rate,
index f8dd10f6df3d47347ea245a5a2b8275ec738f9f4..27b3372adc37e3531aec3dac77f19a0a97ebe486 100644 (file)
@@ -107,7 +107,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
        return mxs_clk_wait(frac->reg, frac->busy);
 }
 
-static struct clk_ops clk_frac_ops = {
+static const struct clk_ops clk_frac_ops = {
        .recalc_rate = clk_frac_recalc_rate,
        .round_rate = clk_frac_round_rate,
        .set_rate = clk_frac_set_rate,
index 74f64c3c429098b832840b5082ab3eb15e5ef73f..b80dc9d5855c9a041957cb7f067d072d868466b8 100644 (file)
@@ -147,9 +147,7 @@ void pxa2xx_core_turbo_switch(bool on)
        "       b       3f\n"
        "2:     b       1b\n"
        "3:     nop\n"
-               : "=&r" (unused)
-               : "r" (clkcfg)
-               : );
+               : "=&r" (unused) : "r" (clkcfg));
 
        local_irq_restore(flags);
 }
index 1b3e8d265bdb02c08c96c7cfecfb3ce8e401de60..a2495457e5647f1ca0e11c23191a9486b8f59cd0 100644 (file)
@@ -156,7 +156,6 @@ extern const struct clk_ops clk_dyn_rcg_ops;
  * @hid_width: number of bits in half integer divider
  * @parent_map: map from software's parent index to hardware's src_sel field
  * @freq_tbl: frequency table
- * @current_freq: last cached frequency when using branches with shared RCGs
  * @clkr: regmap clock handle
  *
  */
@@ -166,7 +165,6 @@ struct clk_rcg2 {
        u8                      hid_width;
        const struct parent_map *parent_map;
        const struct freq_tbl   *freq_tbl;
-       unsigned long           current_freq;
        struct clk_regmap       clkr;
 };
 
@@ -174,7 +172,6 @@ struct clk_rcg2 {
 
 extern const struct clk_ops clk_rcg2_ops;
 extern const struct clk_ops clk_rcg2_floor_ops;
-extern const struct clk_ops clk_rcg2_shared_ops;
 extern const struct clk_ops clk_edp_pixel_ops;
 extern const struct clk_ops clk_byte_ops;
 extern const struct clk_ops clk_byte2_ops;
index 1a0985ae20d2e34a2f4588784bfd70ba2f66050f..bbeaf9c09dbb4750cd6479ae16ab416626a19731 100644 (file)
@@ -358,85 +358,6 @@ const struct clk_ops clk_rcg2_floor_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
 
-static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
-{
-       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-       const char *name = clk_hw_get_name(hw);
-       int ret, count;
-
-       /* force enable RCG */
-       ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
-                                CMD_ROOT_EN, CMD_ROOT_EN);
-       if (ret)
-               return ret;
-
-       /* wait for RCG to turn ON */
-       for (count = 500; count > 0; count--) {
-               ret = clk_rcg2_is_enabled(hw);
-               if (ret)
-                       break;
-               udelay(1);
-       }
-       if (!count)
-               pr_err("%s: RCG did not turn on\n", name);
-
-       /* set clock rate */
-       ret = __clk_rcg2_set_rate(hw, rate, CEIL);
-       if (ret)
-               return ret;
-
-       /* clear force enable RCG */
-       return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
-                                CMD_ROOT_EN, 0);
-}
-
-static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
-                                   unsigned long parent_rate)
-{
-       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
-       /* cache the rate */
-       rcg->current_freq = rate;
-
-       if (!__clk_is_enabled(hw->clk))
-               return 0;
-
-       return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
-}
-
-static unsigned long
-clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
-       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
-       return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
-}
-
-static int clk_rcg2_shared_enable(struct clk_hw *hw)
-{
-       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
-       return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
-}
-
-static void clk_rcg2_shared_disable(struct clk_hw *hw)
-{
-       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
-       /* switch to XO, which is the lowest entry in the freq table */
-       clk_rcg2_shared_set_rate(hw, rcg->freq_tbl[0].freq, 0);
-}
-
-const struct clk_ops clk_rcg2_shared_ops = {
-       .enable = clk_rcg2_shared_enable,
-       .disable = clk_rcg2_shared_disable,
-       .get_parent = clk_rcg2_get_parent,
-       .recalc_rate = clk_rcg2_shared_recalc_rate,
-       .determine_rate = clk_rcg2_determine_rate,
-       .set_rate = clk_rcg2_shared_set_rate,
-};
-EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
-
 struct frac_entry {
        int num;
        int den;
index df3e5fe8442a555a3f22380bebd6a7a41645013d..c60f61b10c7f9335bc269489b84b4d16d44863a7 100644 (file)
                },                                                            \
        }
 
+#define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r)             \
+       static struct clk_rpm _platform##_##_name = {                         \
+               .rpm_clk_id = (r_id),                                         \
+               .rate = (r),                                                  \
+               .hw.init = &(struct clk_init_data){                           \
+                       .ops = &clk_rpm_fixed_ops,                            \
+                       .name = #_name,                                       \
+                       .parent_names = (const char *[]){ "pxo" },            \
+                       .num_parents = 1,                                     \
+               },                                                            \
+       }
+
 #define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r)        \
        static struct clk_rpm _platform##_##_active;                          \
        static struct clk_rpm _platform##_##_name = {                         \
@@ -143,6 +155,13 @@ static int clk_rpm_handoff(struct clk_rpm *r)
        int ret;
        u32 value = INT_MAX;
 
+       /*
+        * The vendor tree simply reads the status for this
+        * RPM clock.
+        */
+       if (r->rpm_clk_id == QCOM_RPM_PLL_4)
+               return 0;
+
        ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
                             r->rpm_clk_id, &value, 1);
        if (ret)
@@ -269,6 +288,32 @@ out:
        mutex_unlock(&rpm_clk_lock);
 }
 
+static int clk_rpm_fixed_prepare(struct clk_hw *hw)
+{
+       struct clk_rpm *r = to_clk_rpm(hw);
+       u32 value = 1;
+       int ret;
+
+       ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+                            r->rpm_clk_id, &value, 1);
+       if (!ret)
+               r->enabled = true;
+
+       return ret;
+}
+
+static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
+{
+       struct clk_rpm *r = to_clk_rpm(hw);
+       u32 value = 0;
+       int ret;
+
+       ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+                            r->rpm_clk_id, &value, 1);
+       if (!ret)
+               r->enabled = false;
+}
+
 static int clk_rpm_set_rate(struct clk_hw *hw,
                            unsigned long rate, unsigned long parent_rate)
 {
@@ -333,6 +378,13 @@ static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
        return r->rate;
 }
 
+static const struct clk_ops clk_rpm_fixed_ops = {
+       .prepare        = clk_rpm_fixed_prepare,
+       .unprepare      = clk_rpm_fixed_unprepare,
+       .round_rate     = clk_rpm_round_rate,
+       .recalc_rate    = clk_rpm_recalc_rate,
+};
+
 static const struct clk_ops clk_rpm_ops = {
        .prepare        = clk_rpm_prepare,
        .unprepare      = clk_rpm_unprepare,
@@ -348,6 +400,45 @@ static const struct clk_ops clk_rpm_branch_ops = {
        .recalc_rate    = clk_rpm_recalc_rate,
 };
 
+/* MSM8660/APQ8060 */
+DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(msm8660, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(msm8660, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
+DEFINE_CLK_RPM(msm8660, smi_clk, smi_a_clk, QCOM_RPM_SMI_CLK);
+DEFINE_CLK_RPM(msm8660, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM_FIXED(msm8660, pll4_clk, pll4_a_clk, QCOM_RPM_PLL_4, 540672000);
+
+static struct clk_rpm *msm8660_clks[] = {
+       [RPM_APPS_FABRIC_CLK] = &msm8660_afab_clk,
+       [RPM_APPS_FABRIC_A_CLK] = &msm8660_afab_a_clk,
+       [RPM_SYS_FABRIC_CLK] = &msm8660_sfab_clk,
+       [RPM_SYS_FABRIC_A_CLK] = &msm8660_sfab_a_clk,
+       [RPM_MM_FABRIC_CLK] = &msm8660_mmfab_clk,
+       [RPM_MM_FABRIC_A_CLK] = &msm8660_mmfab_a_clk,
+       [RPM_DAYTONA_FABRIC_CLK] = &msm8660_daytona_clk,
+       [RPM_DAYTONA_FABRIC_A_CLK] = &msm8660_daytona_a_clk,
+       [RPM_SFPB_CLK] = &msm8660_sfpb_clk,
+       [RPM_SFPB_A_CLK] = &msm8660_sfpb_a_clk,
+       [RPM_CFPB_CLK] = &msm8660_cfpb_clk,
+       [RPM_CFPB_A_CLK] = &msm8660_cfpb_a_clk,
+       [RPM_MMFPB_CLK] = &msm8660_mmfpb_clk,
+       [RPM_MMFPB_A_CLK] = &msm8660_mmfpb_a_clk,
+       [RPM_SMI_CLK] = &msm8660_smi_clk,
+       [RPM_SMI_A_CLK] = &msm8660_smi_a_clk,
+       [RPM_EBI1_CLK] = &msm8660_ebi1_clk,
+       [RPM_EBI1_A_CLK] = &msm8660_ebi1_a_clk,
+       [RPM_PLL4_CLK] = &msm8660_pll4_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_msm8660 = {
+       .clks = msm8660_clks,
+       .num_clks = ARRAY_SIZE(msm8660_clks),
+};
+
 /* apq8064 */
 DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
 DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
@@ -386,6 +477,8 @@ static const struct rpm_clk_desc rpm_clk_apq8064 = {
 };
 
 static const struct of_device_id rpm_clk_match_table[] = {
+       { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
+       { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
        { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
        { }
 };
index cc03d5508627b3ee11cf17eb8762b94f21740f16..c26d9007bfc41c5c21053b3bd2ff9d5ba4dc2a29 100644 (file)
@@ -530,9 +530,91 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
        .clks = msm8974_clks,
        .num_clks = ARRAY_SIZE(msm8974_clks),
 };
+
+/* msm8996 */
+DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8996, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8996, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+                  QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre1_noc_clk, aggre1_noc_a_clk,
+                         QCOM_SMD_RPM_AGGR_CLK, 1, 1000);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre2_noc_clk, aggre2_noc_a_clk,
+                         QCOM_SMD_RPM_AGGR_CLK, 2, 1000);
+DEFINE_CLK_SMD_RPM_QDSS(msm8996, qdss_clk, qdss_a_clk,
+                       QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk1, div_clk1_a, 0xb);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk2, div_clk2_a, 0xc);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk3, div_clk3_a, 0xd);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8996_clks[] = {
+       [RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
+       [RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
+       [RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
+       [RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
+       [RPM_SMD_CNOC_CLK] = &msm8996_cnoc_clk,
+       [RPM_SMD_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
+       [RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
+       [RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
+       [RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
+       [RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
+       [RPM_SMD_IPA_CLK] = &msm8996_ipa_clk,
+       [RPM_SMD_IPA_A_CLK] = &msm8996_ipa_a_clk,
+       [RPM_SMD_CE1_CLK] = &msm8996_ce1_clk,
+       [RPM_SMD_CE1_A_CLK] = &msm8996_ce1_a_clk,
+       [RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
+       [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
+       [RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
+       [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
+       [RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
+       [RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
+       [RPM_SMD_BB_CLK1] = &msm8996_bb_clk1,
+       [RPM_SMD_BB_CLK1_A] = &msm8996_bb_clk1_a,
+       [RPM_SMD_BB_CLK2] = &msm8996_bb_clk2,
+       [RPM_SMD_BB_CLK2_A] = &msm8996_bb_clk2_a,
+       [RPM_SMD_RF_CLK1] = &msm8996_rf_clk1,
+       [RPM_SMD_RF_CLK1_A] = &msm8996_rf_clk1_a,
+       [RPM_SMD_RF_CLK2] = &msm8996_rf_clk2,
+       [RPM_SMD_RF_CLK2_A] = &msm8996_rf_clk2_a,
+       [RPM_SMD_LN_BB_CLK] = &msm8996_ln_bb_clk,
+       [RPM_SMD_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
+       [RPM_SMD_DIV_CLK1] = &msm8996_div_clk1,
+       [RPM_SMD_DIV_A_CLK1] = &msm8996_div_clk1_a,
+       [RPM_SMD_DIV_CLK2] = &msm8996_div_clk2,
+       [RPM_SMD_DIV_A_CLK2] = &msm8996_div_clk2_a,
+       [RPM_SMD_DIV_CLK3] = &msm8996_div_clk3,
+       [RPM_SMD_DIV_A_CLK3] = &msm8996_div_clk3_a,
+       [RPM_SMD_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
+       [RPM_SMD_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
+       [RPM_SMD_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
+       [RPM_SMD_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
+       [RPM_SMD_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
+       [RPM_SMD_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
+       [RPM_SMD_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
+       [RPM_SMD_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
+       .clks = msm8996_clks,
+       .num_clks = ARRAY_SIZE(msm8996_clks),
+};
+
 static const struct of_device_id rpm_smd_clk_match_table[] = {
        { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
        { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
+       { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
        { }
 };
 MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
index d523991c945f9d4cbcd047c8a7edbb405d0e85ba..b8064a336d464bbe9aa253c39ebf0c3283c2ee54 100644 (file)
@@ -111,16 +111,6 @@ qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
 }
 EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
 
-static void qcom_cc_del_clk_provider(void *data)
-{
-       of_clk_del_provider(data);
-}
-
-static void qcom_cc_reset_unregister(void *data)
-{
-       reset_controller_unregister(data);
-}
-
 static void qcom_cc_gdsc_unregister(void *data)
 {
        gdsc_unregister(data);
@@ -143,8 +133,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
        int ret;
 
        clocks_node = of_find_node_by_path("/clocks");
-       if (clocks_node)
-               node = of_find_node_by_name(clocks_node, path);
+       if (clocks_node) {
+               node = of_get_child_by_name(clocks_node, path);
+               of_node_put(clocks_node);
+       }
 
        if (!node) {
                fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
@@ -248,13 +240,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
                        return ret;
        }
 
-       ret = of_clk_add_hw_provider(dev->of_node, qcom_cc_clk_hw_get, cc);
-       if (ret)
-               return ret;
-
-       ret = devm_add_action_or_reset(dev, qcom_cc_del_clk_provider,
-                                      pdev->dev.of_node);
-
+       ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
        if (ret)
                return ret;
 
@@ -266,13 +252,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
        reset->regmap = regmap;
        reset->reset_map = desc->resets;
 
-       ret = reset_controller_register(&reset->rcdev);
-       if (ret)
-               return ret;
-
-       ret = devm_add_action_or_reset(dev, qcom_cc_reset_unregister,
-                                      &reset->rcdev);
-
+       ret = devm_reset_controller_register(dev, &reset->rcdev);
        if (ret)
                return ret;
 
index acbb38151ba1c51aa57bdd3663ae31db23d1bab8..43b5a89c4b282db8fc12ce2a7279cc05fece19b4 100644 (file)
@@ -15,6 +15,7 @@ config CLK_RENESAS
        select CLK_R8A7794 if ARCH_R8A7794
        select CLK_R8A7795 if ARCH_R8A7795
        select CLK_R8A7796 if ARCH_R8A7796
+       select CLK_R8A77970 if ARCH_R8A77970
        select CLK_R8A77995 if ARCH_R8A77995
        select CLK_SH73A0 if ARCH_SH73A0
 
@@ -95,6 +96,10 @@ config CLK_R8A7796
        bool "R-Car M3-W clock support" if COMPILE_TEST
        select CLK_RCAR_GEN3_CPG
 
+config CLK_R8A77970
+       bool "R-Car V3M clock support" if COMPILE_TEST
+       select CLK_RCAR_GEN3_CPG
+
 config CLK_R8A77995
        bool "R-Car D3 clock support" if COMPILE_TEST
        select CLK_RCAR_GEN3_CPG
index cbbb081e2145e20ab25480461ff412a4387fdb4b..34c4e0b37afa0c081c8340e577da03506226fee6 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_CLK_R8A7792)             += r8a7792-cpg-mssr.o
 obj-$(CONFIG_CLK_R8A7794)              += r8a7794-cpg-mssr.o
 obj-$(CONFIG_CLK_R8A7795)              += r8a7795-cpg-mssr.o
 obj-$(CONFIG_CLK_R8A7796)              += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77970)             += r8a77970-cpg-mssr.o
 obj-$(CONFIG_CLK_R8A77995)             += r8a77995-cpg-mssr.o
 obj-$(CONFIG_CLK_SH73A0)               += clk-sh73a0.o
 
index 3e0040c0ac87a14bdd0b6a44c6abeee3ef5a1c14..151336d2ba59e689c2b83bb1255000581cd25043 100644 (file)
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/notifier.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/pm.h>
 #include <linux/slab.h>
 
 #include "clk-div6.h"
@@ -32,6 +34,7 @@
  * @src_shift: Shift to access the register bits to select the parent clock
  * @src_width: Number of register bits to select the parent clock (may be 0)
  * @parents: Array to map from valid parent clocks indices to hardware indices
+ * @nb: Notifier block to save/restore clock state for system resume
  */
 struct div6_clock {
        struct clk_hw hw;
@@ -40,6 +43,7 @@ struct div6_clock {
        u32 src_shift;
        u32 src_width;
        u8 *parents;
+       struct notifier_block nb;
 };
 
 #define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
@@ -176,6 +180,29 @@ static const struct clk_ops cpg_div6_clock_ops = {
        .set_rate = cpg_div6_clock_set_rate,
 };
 
+static int cpg_div6_clock_notifier_call(struct notifier_block *nb,
+                                       unsigned long action, void *data)
+{
+       struct div6_clock *clock = container_of(nb, struct div6_clock, nb);
+
+       switch (action) {
+       case PM_EVENT_RESUME:
+               /*
+                * TODO: This does not yet support DIV6 clocks with multiple
+                * parents, as the parent selection bits are not restored.
+                * Fortunately so far such DIV6 clocks are found only on
+                * R/SH-Mobile SoCs, while the resume functionality is only
+                * needed on R-Car Gen3.
+                */
+               if (__clk_get_enable_count(clock->hw.clk))
+                       cpg_div6_clock_enable(&clock->hw);
+               else
+                       cpg_div6_clock_disable(&clock->hw);
+               return NOTIFY_OK;
+       }
+
+       return NOTIFY_DONE;
+}
 
 /**
  * cpg_div6_register - Register a DIV6 clock
@@ -183,11 +210,13 @@ static const struct clk_ops cpg_div6_clock_ops = {
  * @num_parents: Number of parent clocks of the DIV6 clock (1, 4, or 8)
  * @parent_names: Array containing the names of the parent clocks
  * @reg: Mapped register used to control the DIV6 clock
+ * @notifiers: Optional notifier chain to save/restore state for system resume
  */
 struct clk * __init cpg_div6_register(const char *name,
                                      unsigned int num_parents,
                                      const char **parent_names,
-                                     void __iomem *reg)
+                                     void __iomem *reg,
+                                     struct raw_notifier_head *notifiers)
 {
        unsigned int valid_parents;
        struct clk_init_data init;
@@ -258,6 +287,11 @@ struct clk * __init cpg_div6_register(const char *name,
        if (IS_ERR(clk))
                goto free_parents;
 
+       if (notifiers) {
+               clock->nb.notifier_call = cpg_div6_clock_notifier_call;
+               raw_notifier_chain_register(notifiers, &clock->nb);
+       }
+
        return clk;
 
 free_parents:
@@ -301,7 +335,7 @@ static void __init cpg_div6_clock_init(struct device_node *np)
        for (i = 0; i < num_parents; i++)
                parent_names[i] = of_clk_get_parent_name(np, i);
 
-       clk = cpg_div6_register(clk_name, num_parents, parent_names, reg);
+       clk = cpg_div6_register(clk_name, num_parents, parent_names, reg, NULL);
        if (IS_ERR(clk)) {
                pr_err("%s: failed to register %s DIV6 clock (%ld)\n",
                       __func__, np->name, PTR_ERR(clk));
index 065dfb49adf6fb10170422b56298e47809372e78..3af640a0b08dc81e01b53f50ac36a85e16dae4e3 100644 (file)
@@ -3,6 +3,7 @@
 #define __RENESAS_CLK_DIV6_H__
 
 struct clk *cpg_div6_register(const char *name, unsigned int num_parents,
-                             const char **parent_names, void __iomem *reg);
+                             const char **parent_names, void __iomem *reg,
+                             struct raw_notifier_head *notifiers);
 
 #endif
index 500a9e4e03c489579c5e056a0f27cd2100ec3e05..c944cc421e3086434a2fd59233850b55bfd19b7f 100644 (file)
@@ -156,10 +156,8 @@ static struct clk * __init cpg_mstp_clock_register(const char *name,
        struct clk *clk;
 
        clock = kzalloc(sizeof(*clock), GFP_KERNEL);
-       if (!clock) {
-               pr_err("%s: failed to allocate MSTP clock.\n", __func__);
+       if (!clock)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &cpg_mstp_clock_ops;
@@ -196,7 +194,6 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
        if (group == NULL || clks == NULL) {
                kfree(group);
                kfree(clks);
-               pr_err("%s: failed to allocate group\n", __func__);
                return;
        }
 
index 0b2e56d0d94bb04c633cdebc1bef916e67d9d732..d14cbe1ca29ac0098610b1364b8fefb5ae07027d 100644 (file)
@@ -423,7 +423,6 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
                /* We're leaking memory on purpose, there's no point in cleaning
                 * up as the system won't boot anyway.
                 */
-               pr_err("%s: failed to allocate cpg\n", __func__);
                return;
        }
 
index 5adb934326d1f5be4725bc645dd13f6e3c494d7d..127c58135c8fec76b812b186b4f0d6d670ca828f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * rz Core CPG Clocks
+ * RZ/A1 Core CPG Clocks
  *
  * Copyright (C) 2013 Ideas On Board SPRL
  * Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
index 9e2360a8e14b860ec4a3a5e9982c370b772be178..2859504cc8668199f69f8bd32c4ae3d9c5bdfe3f 100644 (file)
@@ -129,6 +129,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
        DEF_MOD("scif2",                 719,   R8A7745_CLK_P),
        DEF_MOD("scif1",                 720,   R8A7745_CLK_P),
        DEF_MOD("scif0",                 721,   R8A7745_CLK_P),
+       DEF_MOD("du1",                   723,   R8A7745_CLK_ZX),
        DEF_MOD("du0",                   724,   R8A7745_CLK_ZX),
        DEF_MOD("ipmmu-sgx",             800,   R8A7745_CLK_ZX),
        DEF_MOD("vin1",                  810,   R8A7745_CLK_ZG),
index 762b2f8824f118deb1a41e0f24ab5281b5179712..b1d9f48eae9e6ad492d1bc7b634ef4f32c9fd341 100644 (file)
@@ -149,7 +149,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
        DEF_MOD("usb-dmac1",             331,   R8A7795_CLK_S3D1),
        DEF_MOD("rwdt",                  402,   R8A7795_CLK_R),
        DEF_MOD("intc-ex",               407,   R8A7795_CLK_CP),
-       DEF_MOD("intc-ap",               408,   R8A7795_CLK_S3D1),
+       DEF_MOD("intc-ap",               408,   R8A7795_CLK_S0D3),
        DEF_MOD("audmac1",               501,   R8A7795_CLK_S0D3),
        DEF_MOD("audmac0",               502,   R8A7795_CLK_S0D3),
        DEF_MOD("drif7",                 508,   R8A7795_CLK_S3D2),
@@ -348,6 +348,7 @@ static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
        { MOD_CLK_ID(217), R8A7795_CLK_S3D1 },  /* SYS-DMAC2 */
        { MOD_CLK_ID(218), R8A7795_CLK_S3D1 },  /* SYS-DMAC1 */
        { MOD_CLK_ID(219), R8A7795_CLK_S3D1 },  /* SYS-DMAC0 */
+       { MOD_CLK_ID(408), R8A7795_CLK_S3D1 },  /* INTC-AP */
        { MOD_CLK_ID(501), R8A7795_CLK_S3D1 },  /* AUDMAC1 */
        { MOD_CLK_ID(502), R8A7795_CLK_S3D1 },  /* AUDMAC0 */
        { MOD_CLK_ID(523), R8A7795_CLK_S3D4 },  /* PWM */
index e5e7fb212288c3779dfa09ed0f001ee3f77b4408..b3767472088ac0965ef41fe13a6c2c1bde41bf2e 100644 (file)
@@ -143,7 +143,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
        DEF_MOD("usb-dmac1",             331,   R8A7796_CLK_S3D1),
        DEF_MOD("rwdt",                  402,   R8A7796_CLK_R),
        DEF_MOD("intc-ex",               407,   R8A7796_CLK_CP),
-       DEF_MOD("intc-ap",               408,   R8A7796_CLK_S3D1),
+       DEF_MOD("intc-ap",               408,   R8A7796_CLK_S0D3),
        DEF_MOD("audmac1",               501,   R8A7796_CLK_S0D3),
        DEF_MOD("audmac0",               502,   R8A7796_CLK_S0D3),
        DEF_MOD("drif7",                 508,   R8A7796_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
new file mode 100644 (file)
index 0000000..72f9852
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * r8a77970 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2017 Cogent Embedded Inc.
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77970-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+       /* Core Clock Outputs exported to DT */
+       LAST_DT_CORE_CLK = R8A77970_CLK_OSC,
+
+       /* External Input Clocks */
+       CLK_EXTAL,
+       CLK_EXTALR,
+
+       /* Internal Core Clocks */
+       CLK_MAIN,
+       CLK_PLL0,
+       CLK_PLL1,
+       CLK_PLL3,
+       CLK_PLL1_DIV2,
+       CLK_PLL1_DIV4,
+
+       /* Module Clocks */
+       MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
+       /* External Clock Inputs */
+       DEF_INPUT("extal",      CLK_EXTAL),
+       DEF_INPUT("extalr",     CLK_EXTALR),
+
+       /* Internal Core Clocks */
+       DEF_BASE(".main",       CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+       DEF_BASE(".pll0",       CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+       DEF_BASE(".pll1",       CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+       DEF_BASE(".pll3",       CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+       DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2,  CLK_PLL1,       2, 1),
+       DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4,  CLK_PLL1_DIV2,  2, 1),
+
+       /* Core Clock Outputs */
+       DEF_FIXED("ztr",        R8A77970_CLK_ZTR,   CLK_PLL1_DIV2,  6, 1),
+       DEF_FIXED("ztrd2",      R8A77970_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+       DEF_FIXED("zt",         R8A77970_CLK_ZT,    CLK_PLL1_DIV2,  4, 1),
+       DEF_FIXED("zx",         R8A77970_CLK_ZX,    CLK_PLL1_DIV2,  3, 1),
+       DEF_FIXED("s1d1",       R8A77970_CLK_S1D1,  CLK_PLL1_DIV2,  4, 1),
+       DEF_FIXED("s1d2",       R8A77970_CLK_S1D2,  CLK_PLL1_DIV2,  8, 1),
+       DEF_FIXED("s1d4",       R8A77970_CLK_S1D4,  CLK_PLL1_DIV2, 16, 1),
+       DEF_FIXED("s2d1",       R8A77970_CLK_S2D1,  CLK_PLL1_DIV2,  6, 1),
+       DEF_FIXED("s2d2",       R8A77970_CLK_S2D2,  CLK_PLL1_DIV2, 12, 1),
+       DEF_FIXED("s2d4",       R8A77970_CLK_S2D4,  CLK_PLL1_DIV2, 24, 1),
+
+       DEF_FIXED("cl",         R8A77970_CLK_CL,    CLK_PLL1_DIV2, 48, 1),
+       DEF_FIXED("cp",         R8A77970_CLK_CP,    CLK_EXTAL,      2, 1),
+
+       DEF_DIV6P1("canfd",     R8A77970_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+       DEF_DIV6P1("mso",       R8A77970_CLK_MSO,   CLK_PLL1_DIV4, 0x014),
+       DEF_DIV6P1("csi0",      R8A77970_CLK_CSI0,  CLK_PLL1_DIV4, 0x00c),
+
+       DEF_FIXED("osc",        R8A77970_CLK_OSC,   CLK_PLL1_DIV2, 12*1024, 1),
+       DEF_FIXED("r",          R8A77970_CLK_R,     CLK_EXTALR,    1, 1),
+};
+
+static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = {
+       DEF_MOD("ivcp1e",                127,   R8A77970_CLK_S2D1),
+       DEF_MOD("scif4",                 203,   R8A77970_CLK_S2D4),
+       DEF_MOD("scif3",                 204,   R8A77970_CLK_S2D4),
+       DEF_MOD("scif1",                 206,   R8A77970_CLK_S2D4),
+       DEF_MOD("scif0",                 207,   R8A77970_CLK_S2D4),
+       DEF_MOD("msiof3",                208,   R8A77970_CLK_MSO),
+       DEF_MOD("msiof2",                209,   R8A77970_CLK_MSO),
+       DEF_MOD("msiof1",                210,   R8A77970_CLK_MSO),
+       DEF_MOD("msiof0",                211,   R8A77970_CLK_MSO),
+       DEF_MOD("mfis",                  213,   R8A77970_CLK_S2D2),
+       DEF_MOD("sys-dmac2",             217,   R8A77970_CLK_S2D1),
+       DEF_MOD("sys-dmac1",             218,   R8A77970_CLK_S2D1),
+       DEF_MOD("rwdt",                  402,   R8A77970_CLK_R),
+       DEF_MOD("intc-ex",               407,   R8A77970_CLK_CP),
+       DEF_MOD("intc-ap",               408,   R8A77970_CLK_S2D1),
+       DEF_MOD("hscif3",                517,   R8A77970_CLK_S2D1),
+       DEF_MOD("hscif2",                518,   R8A77970_CLK_S2D1),
+       DEF_MOD("hscif1",                519,   R8A77970_CLK_S2D1),
+       DEF_MOD("hscif0",                520,   R8A77970_CLK_S2D1),
+       DEF_MOD("thermal",               522,   R8A77970_CLK_CP),
+       DEF_MOD("pwm",                   523,   R8A77970_CLK_S2D4),
+       DEF_MOD("fcpvd0",                603,   R8A77970_CLK_S2D1),
+       DEF_MOD("vspd0",                 623,   R8A77970_CLK_S2D1),
+       DEF_MOD("csi40",                 716,   R8A77970_CLK_CSI0),
+       DEF_MOD("du0",                   724,   R8A77970_CLK_S2D1),
+       DEF_MOD("vin3",                  808,   R8A77970_CLK_S2D1),
+       DEF_MOD("vin2",                  809,   R8A77970_CLK_S2D1),
+       DEF_MOD("vin1",                  810,   R8A77970_CLK_S2D1),
+       DEF_MOD("vin0",                  811,   R8A77970_CLK_S2D1),
+       DEF_MOD("etheravb",              812,   R8A77970_CLK_S2D2),
+       DEF_MOD("gpio5",                 907,   R8A77970_CLK_CP),
+       DEF_MOD("gpio4",                 908,   R8A77970_CLK_CP),
+       DEF_MOD("gpio3",                 909,   R8A77970_CLK_CP),
+       DEF_MOD("gpio2",                 910,   R8A77970_CLK_CP),
+       DEF_MOD("gpio1",                 911,   R8A77970_CLK_CP),
+       DEF_MOD("gpio0",                 912,   R8A77970_CLK_CP),
+       DEF_MOD("can-fd",                914,   R8A77970_CLK_S2D2),
+       DEF_MOD("i2c4",                  927,   R8A77970_CLK_S2D2),
+       DEF_MOD("i2c3",                  928,   R8A77970_CLK_S2D2),
+       DEF_MOD("i2c2",                  929,   R8A77970_CLK_S2D2),
+       DEF_MOD("i2c1",                  930,   R8A77970_CLK_S2D2),
+       DEF_MOD("i2c0",                  931,   R8A77970_CLK_S2D2),
+};
+
+static const unsigned int r8a77970_crit_mod_clks[] __initconst = {
+       MOD_CLK_ID(408),        /* INTC-AP (GIC) */
+};
+
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ *   MD                EXTAL           PLL0    PLL1    PLL3
+ * 14 13 19    (MHz)
+ *-------------------------------------------------
+ * 0  0  0     16.66 x 1       x192    x192    x96
+ * 0  0  1     16.66 x 1       x192    x192    x80
+ * 0  1  0     20    x 1       x160    x160    x80
+ * 0  1  1     20    x 1       x160    x160    x66
+ * 1  0  0     27    / 2       x236    x236    x118
+ * 1  0  1     27    / 2       x236    x236    x98
+ * 1  1  0     33.33 / 2       x192    x192    x96
+ * 1  1  1     33.33 / 2       x192    x192    x80
+ */
+#define CPG_PLL_CONFIG_INDEX(md)       ((((md) & BIT(14)) >> 12) | \
+                                        (((md) & BIT(13)) >> 12) | \
+                                        (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[8] __initconst = {
+       /* EXTAL div    PLL1 mult/div   PLL3 mult/div */
+       { 1,            192,    1,      96,     1,      },
+       { 1,            192,    1,      80,     1,      },
+       { 1,            160,    1,      80,     1,      },
+       { 1,            160,    1,      66,     1,      },
+       { 2,            236,    1,      118,    1,      },
+       { 2,            236,    1,      98,     1,      },
+       { 2,            192,    1,      96,     1,      },
+       { 2,            192,    1,      80,     1,      },
+};
+
+static int __init r8a77970_cpg_mssr_init(struct device *dev)
+{
+       const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+       u32 cpg_mode;
+       int error;
+
+       error = rcar_rst_read_mode_pins(&cpg_mode);
+       if (error)
+               return error;
+
+       cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+       return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77970_cpg_mssr_info __initconst = {
+       /* Core Clocks */
+       .core_clks = r8a77970_core_clks,
+       .num_core_clks = ARRAY_SIZE(r8a77970_core_clks),
+       .last_dt_core_clk = LAST_DT_CORE_CLK,
+       .num_total_core_clks = MOD_CLK_BASE,
+
+       /* Module Clocks */
+       .mod_clks = r8a77970_mod_clks,
+       .num_mod_clks = ARRAY_SIZE(r8a77970_mod_clks),
+       .num_hw_mod_clks = 12 * 32,
+
+       /* Critical Module Clocks */
+       .crit_mod_clks = r8a77970_crit_mod_clks,
+       .num_crit_mod_clks = ARRAY_SIZE(r8a77970_crit_mod_clks),
+
+       /* Callbacks */
+       .init = r8a77970_cpg_mssr_init,
+       .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
index e594cf8ee63b64e0b297c623ca31551b06d9fe31..ea4cafbe6e851aca89c24f79b4912b1a2278d774 100644 (file)
@@ -127,7 +127,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
        DEF_MOD("usb-dmac1",             331,   R8A77995_CLK_S3D1),
        DEF_MOD("rwdt",                  402,   R8A77995_CLK_R),
        DEF_MOD("intc-ex",               407,   R8A77995_CLK_CP),
-       DEF_MOD("intc-ap",               408,   R8A77995_CLK_S3D1),
+       DEF_MOD("intc-ap",               408,   R8A77995_CLK_S1D2),
        DEF_MOD("audmac0",               502,   R8A77995_CLK_S3D1),
        DEF_MOD("hscif3",                517,   R8A77995_CLK_S3D1C),
        DEF_MOD("hscif0",                520,   R8A77995_CLK_S3D1C),
index 123b1e622179308eb4080a00ae5a46472e318d28..feb14579a71b3bf47f38039315553b179d8716f7 100644 (file)
@@ -262,10 +262,9 @@ static unsigned int cpg_pll0_div __initdata;
 static u32 cpg_mode __initdata;
 
 struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
-                                              const struct cpg_core_clk *core,
-                                              const struct cpg_mssr_info *info,
-                                              struct clk **clks,
-                                              void __iomem *base)
+       const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+       struct clk **clks, void __iomem *base,
+       struct raw_notifier_head *notifiers)
 {
        const struct clk_div_table *table = NULL;
        const struct clk *parent;
index 9eba07ff8b11e20eaac35acd8651b9f615664d85..020a3baad0154231fb397792912fed327f39a0c1 100644 (file)
@@ -34,9 +34,9 @@ struct rcar_gen2_cpg_pll_config {
 };
 
 struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
-                                      const struct cpg_core_clk *core,
-                                      const struct cpg_mssr_info *info,
-                                      struct clk **clks, void __iomem *base);
+       const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+       struct clk **clks, void __iomem *base,
+       struct raw_notifier_head *notifiers);
 int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
                       unsigned int pll0_div, u32 mode);
 
index 9511058165475dd7c8563aaa1409d1dd5ada5cc1..0904886f55015a3bdf309f7bdb38add0fd08a7e3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/sys_soc.h>
 
 #define CPG_PLL2CR             0x002c
 #define CPG_PLL4CR             0x01f4
 
+struct cpg_simple_notifier {
+       struct notifier_block nb;
+       void __iomem *reg;
+       u32 saved;
+};
+
+static int cpg_simple_notifier_call(struct notifier_block *nb,
+                                   unsigned long action, void *data)
+{
+       struct cpg_simple_notifier *csn =
+               container_of(nb, struct cpg_simple_notifier, nb);
+
+       switch (action) {
+       case PM_EVENT_SUSPEND:
+               csn->saved = readl(csn->reg);
+               return NOTIFY_OK;
+
+       case PM_EVENT_RESUME:
+               writel(csn->saved, csn->reg);
+               return NOTIFY_OK;
+       }
+       return NOTIFY_DONE;
+}
+
+static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+                                        struct cpg_simple_notifier *csn)
+{
+       csn->nb.notifier_call = cpg_simple_notifier_call;
+       raw_notifier_chain_register(notifiers, &csn->nb);
+}
 
 /*
  * SDn Clock
@@ -55,8 +86,8 @@ struct sd_div_table {
 
 struct sd_clock {
        struct clk_hw hw;
-       void __iomem *reg;
        const struct sd_div_table *div_table;
+       struct cpg_simple_notifier csn;
        unsigned int div_num;
        unsigned int div_min;
        unsigned int div_max;
@@ -97,12 +128,12 @@ static const struct sd_div_table cpg_sd_div_table[] = {
 static int cpg_sd_clock_enable(struct clk_hw *hw)
 {
        struct sd_clock *clock = to_sd_clock(hw);
-       u32 val = readl(clock->reg);
+       u32 val = readl(clock->csn.reg);
 
        val &= ~(CPG_SD_STP_MASK);
        val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
 
-       writel(val, clock->reg);
+       writel(val, clock->csn.reg);
 
        return 0;
 }
@@ -111,14 +142,14 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
 {
        struct sd_clock *clock = to_sd_clock(hw);
 
-       writel(readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+       writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg);
 }
 
 static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
 {
        struct sd_clock *clock = to_sd_clock(hw);
 
-       return !(readl(clock->reg) & CPG_SD_STP_MASK);
+       return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
 }
 
 static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
@@ -170,10 +201,10 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
 
        clock->cur_div_idx = i;
 
-       val = readl(clock->reg);
+       val = readl(clock->csn.reg);
        val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
        val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
-       writel(val, clock->reg);
+       writel(val, clock->csn.reg);
 
        return 0;
 }
@@ -188,8 +219,8 @@ static const struct clk_ops cpg_sd_clock_ops = {
 };
 
 static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
-                                              void __iomem *base,
-                                              const char *parent_name)
+       void __iomem *base, const char *parent_name,
+       struct raw_notifier_head *notifiers)
 {
        struct clk_init_data init;
        struct sd_clock *clock;
@@ -207,12 +238,12 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
        init.parent_names = &parent_name;
        init.num_parents = 1;
 
-       clock->reg = base + core->offset;
+       clock->csn.reg = base + core->offset;
        clock->hw.init = &init;
        clock->div_table = cpg_sd_div_table;
        clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
 
-       sd_fc = readl(clock->reg) & CPG_SD_FC_MASK;
+       sd_fc = readl(clock->csn.reg) & CPG_SD_FC_MASK;
        for (i = 0; i < clock->div_num; i++)
                if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
                        break;
@@ -233,8 +264,13 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
 
        clk = clk_register(NULL, &clock->hw);
        if (IS_ERR(clk))
-               kfree(clock);
+               goto free_clock;
 
+       cpg_simple_notifier_register(notifiers, &clock->csn);
+       return clk;
+
+free_clock:
+       kfree(clock);
        return clk;
 }
 
@@ -265,7 +301,8 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
 
 struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
        const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
-       struct clk **clks, void __iomem *base)
+       struct clk **clks, void __iomem *base,
+       struct raw_notifier_head *notifiers)
 {
        const struct clk *parent;
        unsigned int mult = 1;
@@ -331,22 +368,32 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
                break;
 
        case CLK_TYPE_GEN3_SD:
-               return cpg_sd_clk_register(core, base, __clk_get_name(parent));
+               return cpg_sd_clk_register(core, base, __clk_get_name(parent),
+                                          notifiers);
 
        case CLK_TYPE_GEN3_R:
                if (cpg_quirks & RCKCR_CKSEL) {
+                       struct cpg_simple_notifier *csn;
+
+                       csn = kzalloc(sizeof(*csn), GFP_KERNEL);
+                       if (!csn)
+                               return ERR_PTR(-ENOMEM);
+
+                       csn->reg = base + CPG_RCKCR;
+
                        /*
                         * RINT is default.
                         * Only if EXTALR is populated, we switch to it.
                         */
-                       value = readl(base + CPG_RCKCR) & 0x3f;
+                       value = readl(csn->reg) & 0x3f;
 
                        if (clk_get_rate(clks[cpg_clk_extalr])) {
                                parent = clks[cpg_clk_extalr];
                                value |= BIT(15);
                        }
 
-                       writel(value, base + CPG_RCKCR);
+                       writel(value, csn->reg);
+                       cpg_simple_notifier_register(notifiers, csn);
                        break;
                }
 
index d756ef8b78eb6c02d9fee43dd6670cda36df53bb..2e4284399f530f96cf249238a5beb813408d8d34 100644 (file)
@@ -44,7 +44,8 @@ struct rcar_gen3_cpg_pll_config {
 
 struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
        const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
-       struct clk **clks, void __iomem *base);
+       struct clk **clks, void __iomem *base,
+       struct raw_notifier_head *notifiers);
 int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
                       unsigned int clk_extalr, u32 mode);
 
index e580a5e6346c2533ab34dd12cda0dcabe7369476..e3d03ffea4bc2fd6e17cf9268f797ade839ca750 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_clock.h>
 #include <linux/pm_domain.h>
+#include <linux/psci.h>
 #include <linux/reset-controller.h>
 #include <linux/slab.h>
 
@@ -106,6 +107,9 @@ static const u16 srcr[] = {
  * @num_core_clks: Number of Core Clocks in clks[]
  * @num_mod_clks: Number of Module Clocks in clks[]
  * @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @notifiers: Notifier chain to save/restore clock state for system resume
+ * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
+ * @smstpcr_saved[].val: Saved values of SMSTPCR[]
  */
 struct cpg_mssr_priv {
 #ifdef CONFIG_RESET_CONTROLLER
@@ -119,6 +123,12 @@ struct cpg_mssr_priv {
        unsigned int num_core_clks;
        unsigned int num_mod_clks;
        unsigned int last_dt_core_clk;
+
+       struct raw_notifier_head notifiers;
+       struct {
+               u32 mask;
+               u32 val;
+       } smstpcr_saved[ARRAY_SIZE(smstpcr)];
 };
 
 
@@ -293,7 +303,8 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
 
                if (core->type == CLK_TYPE_DIV6P1) {
                        clk = cpg_div6_register(core->name, 1, &parent_name,
-                                               priv->base + core->offset);
+                                               priv->base + core->offset,
+                                               &priv->notifiers);
                } else {
                        clk = clk_register_fixed_factor(NULL, core->name,
                                                        parent_name, 0,
@@ -304,7 +315,8 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
        default:
                if (info->cpg_clk_register)
                        clk = info->cpg_clk_register(dev, core, info,
-                                                    priv->clks, priv->base);
+                                                    priv->clks, priv->base,
+                                                    &priv->notifiers);
                else
                        dev_err(dev, "%s has unsupported core clock type %u\n",
                                core->name, core->type);
@@ -382,6 +394,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
 
        dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
        priv->clks[id] = clk;
+       priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
        return;
 
 fail:
@@ -680,6 +693,12 @@ static const struct of_device_id cpg_mssr_match[] = {
                .data = &r8a7796_cpg_mssr_info,
        },
 #endif
+#ifdef CONFIG_CLK_R8A77970
+       {
+               .compatible = "renesas,r8a77970-cpg-mssr",
+               .data = &r8a77970_cpg_mssr_info,
+       },
+#endif
 #ifdef CONFIG_CLK_R8A77995
        {
                .compatible = "renesas,r8a77995-cpg-mssr",
@@ -694,6 +713,85 @@ static void cpg_mssr_del_clk_provider(void *data)
        of_clk_del_provider(data);
 }
 
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
+static int cpg_mssr_suspend_noirq(struct device *dev)
+{
+       struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
+       unsigned int reg;
+
+       /* This is the best we can do to check for the presence of PSCI */
+       if (!psci_ops.cpu_suspend)
+               return 0;
+
+       /* Save module registers with bits under our control */
+       for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
+               if (priv->smstpcr_saved[reg].mask)
+                       priv->smstpcr_saved[reg].val =
+                               readl(priv->base + SMSTPCR(reg));
+       }
+
+       /* Save core clocks */
+       raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
+
+       return 0;
+}
+
+static int cpg_mssr_resume_noirq(struct device *dev)
+{
+       struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
+       unsigned int reg, i;
+       u32 mask, oldval, newval;
+
+       /* This is the best we can do to check for the presence of PSCI */
+       if (!psci_ops.cpu_suspend)
+               return 0;
+
+       /* Restore core clocks */
+       raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
+
+       /* Restore module clocks */
+       for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
+               mask = priv->smstpcr_saved[reg].mask;
+               if (!mask)
+                       continue;
+
+               oldval = readl(priv->base + SMSTPCR(reg));
+               newval = oldval & ~mask;
+               newval |= priv->smstpcr_saved[reg].val & mask;
+               if (newval == oldval)
+                       continue;
+
+               writel(newval, priv->base + SMSTPCR(reg));
+
+               /* Wait until enabled clocks are really enabled */
+               mask &= ~priv->smstpcr_saved[reg].val;
+               if (!mask)
+                       continue;
+
+               for (i = 1000; i > 0; --i) {
+                       oldval = readl(priv->base + MSTPSR(reg));
+                       if (!(oldval & mask))
+                               break;
+                       cpu_relax();
+               }
+
+               if (!i)
+                       dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
+                                priv->base + SMSTPCR(reg), oldval & mask);
+       }
+
+       return 0;
+}
+
+static const struct dev_pm_ops cpg_mssr_pm = {
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
+                                     cpg_mssr_resume_noirq)
+};
+#define DEV_PM_OPS     &cpg_mssr_pm
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+
 static int __init cpg_mssr_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -729,10 +827,12 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
        if (!clks)
                return -ENOMEM;
 
+       dev_set_drvdata(dev, priv);
        priv->clks = clks;
        priv->num_core_clks = info->num_total_core_clks;
        priv->num_mod_clks = info->num_hw_mod_clks;
        priv->last_dt_core_clk = info->last_dt_core_clk;
+       RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
 
        for (i = 0; i < nclks; i++)
                clks[i] = ERR_PTR(-ENOENT);
@@ -769,6 +869,7 @@ static struct platform_driver cpg_mssr_driver = {
        .driver         = {
                .name   = "renesas-cpg-mssr",
                .of_match_table = cpg_mssr_match,
+               .pm = DEV_PM_OPS,
        },
 };
 
index 94b9071d1061ab16dd18268c399bb1576a0041a8..0745b09303082ef9bf65288bca5dac66ae0b00c1 100644 (file)
@@ -127,7 +127,8 @@ struct cpg_mssr_info {
        struct clk *(*cpg_clk_register)(struct device *dev,
                                        const struct cpg_core_clk *core,
                                        const struct cpg_mssr_info *info,
-                                       struct clk **clks, void __iomem *base);
+                                       struct clk **clks, void __iomem *base,
+                                       struct raw_notifier_head *notifiers);
 };
 
 extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
@@ -138,6 +139,7 @@ extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
 extern const struct cpg_mssr_info r8a7794_cpg_mssr_info;
 extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
 extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77970_cpg_mssr_info;
 extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
 
 
index 0e09684d43a5b15cf441f06fa12145abeb0234a2..32c19c0f1e141f95bc364f8695a87d85d90ac6bf 100644 (file)
@@ -322,8 +322,6 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
                                             sizeof(*rates) * nrates,
                                             GFP_KERNEL);
                if (!cpuclk->rate_table) {
-                       pr_err("%s: could not allocate memory for cpuclk rates\n",
-                              __func__);
                        ret = -ENOMEM;
                        goto unregister_notifier;
                }
index 00ad0e5f8d6661ee767be492cb35a407ea83b873..67e73fd71f095c9b1164e26a5422c4d8f5edd6ee 100644 (file)
@@ -290,15 +290,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
                        RK2928_CLKSEL_CON(0), 6, 2, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        div_core_peri_t, RK2928_CLKGATE_CON(0), 0, GFLAGS),
 
-       COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
+       COMPOSITE(ACLK_VEPU, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
                        RK2928_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 9, GFLAGS),
-       GATE(0, "hclk_vepu", "aclk_vepu", 0,
+       GATE(HCLK_VEPU, "hclk_vepu", "aclk_vepu", 0,
                        RK2928_CLKGATE_CON(3), 10, GFLAGS),
-       COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
+       COMPOSITE(ACLK_VDPU, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
                        RK2928_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 11, GFLAGS),
-       GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
+       GATE(HCLK_VDPU, "hclk_vdpu", "aclk_vdpu", 0,
                        RK2928_CLKGATE_CON(3), 12, GFLAGS),
 
        GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
@@ -644,13 +644,13 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
 
        GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
        GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
-       GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
+       GATE(HCLK_CIF1, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
        GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
 
        GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(5), 14, GFLAGS),
 
-       GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
+       GATE(ACLK_CIF1, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
 
        GATE(PCLK_TIMER1, "pclk_timer1", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 8, GFLAGS),
        GATE(PCLK_TIMER2, "pclk_timer2", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
index fc56565379dd46123c31982c691bd44dacd5912b..7c4d242f19c1003b55cc6eee69af7ff503bfe820 100644 (file)
@@ -711,7 +711,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        GATE(PCLK_SIM, "pclk_sim", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 8, GFLAGS),
        GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 6, GFLAGS),
        GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 5, GFLAGS),
-       GATE(0, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
+       GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
        GATE(0, "pclk_efuse_1024", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 0, GFLAGS),
 
        /*
index 23835001e8bdb5007efc9b1e66cd8c9557fd6de1..ef8900bc077f60c78b2a097c9cd89457973229db 100644 (file)
@@ -6,6 +6,7 @@
 obj-$(CONFIG_COMMON_CLK)       += clk.o clk-pll.o clk-cpu.o
 obj-$(CONFIG_SOC_EXYNOS3250)   += clk-exynos3250.o
 obj-$(CONFIG_ARCH_EXYNOS4)     += clk-exynos4.o
+obj-$(CONFIG_ARCH_EXYNOS4)     += clk-exynos4412-isp.o
 obj-$(CONFIG_SOC_EXYNOS5250)   += clk-exynos5250.o
 obj-$(CONFIG_SOC_EXYNOS5260)   += clk-exynos5260.o
 obj-$(CONFIG_SOC_EXYNOS5410)   += clk-exynos5410.o
index 6686e8ba61f9f768681810b4ceaa3afcb5ff43b6..d2c99d8916b83e48c1b23d6c49dd98f43f81f2db 100644 (file)
@@ -457,8 +457,6 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
 
        cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
        if (!cpuclk->cfg) {
-               pr_err("%s: could not allocate memory for cpuclk data\n",
-                               __func__);
                ret = -ENOMEM;
                goto unregister_clk_nb;
        }
index b117783ed40478b03be6abe4ac1b107df8d7a424..5bfc92ee3129a5fec4df14c58f7024a64ed15058 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 
 #include <dt-bindings/clock/exynos-audss-clk.h>
 
@@ -36,14 +37,13 @@ static struct clk *epll;
 #define ASS_CLK_DIV 0x4
 #define ASS_CLK_GATE 0x8
 
-#ifdef CONFIG_PM_SLEEP
 static unsigned long reg_save[][2] = {
        { ASS_CLK_SRC,  0 },
        { ASS_CLK_DIV,  0 },
        { ASS_CLK_GATE, 0 },
 };
 
-static int exynos_audss_clk_suspend(struct device *dev)
+static int __maybe_unused exynos_audss_clk_suspend(struct device *dev)
 {
        int i;
 
@@ -53,7 +53,7 @@ static int exynos_audss_clk_suspend(struct device *dev)
        return 0;
 }
 
-static int exynos_audss_clk_resume(struct device *dev)
+static int __maybe_unused exynos_audss_clk_resume(struct device *dev)
 {
        int i;
 
@@ -62,7 +62,6 @@ static int exynos_audss_clk_resume(struct device *dev)
 
        return 0;
 }
-#endif /* CONFIG_PM_SLEEP */
 
 struct exynos_audss_clk_drvdata {
        unsigned int has_adma_clk:1;
@@ -135,6 +134,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
        const struct exynos_audss_clk_drvdata *variant;
        struct clk_hw **clk_table;
        struct resource *res;
+       struct device *dev = &pdev->dev;
        int i, ret = 0;
 
        variant = of_device_get_match_data(&pdev->dev);
@@ -142,15 +142,15 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
                return -EINVAL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       reg_base = devm_ioremap_resource(&pdev->dev, res);
+       reg_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(reg_base)) {
-               dev_err(&pdev->dev, "failed to map audss registers\n");
+               dev_err(dev, "failed to map audss registers\n");
                return PTR_ERR(reg_base);
        }
 
        epll = ERR_PTR(-ENODEV);
 
-       clk_data = devm_kzalloc(&pdev->dev,
+       clk_data = devm_kzalloc(dev,
                                sizeof(*clk_data) +
                                sizeof(*clk_data->hws) * EXYNOS_AUDSS_MAX_CLKS,
                                GFP_KERNEL);
@@ -160,8 +160,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
        clk_data->num = variant->num_clks;
        clk_table = clk_data->hws;
 
-       pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
-       pll_in = devm_clk_get(&pdev->dev, "pll_in");
+       pll_ref = devm_clk_get(dev, "pll_ref");
+       pll_in = devm_clk_get(dev, "pll_in");
        if (!IS_ERR(pll_ref))
                mout_audss_p[0] = __clk_get_name(pll_ref);
        if (!IS_ERR(pll_in)) {
@@ -172,88 +172,103 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
 
                        ret = clk_prepare_enable(epll);
                        if (ret) {
-                               dev_err(&pdev->dev,
+                               dev_err(dev,
                                        "failed to prepare the epll clock\n");
                                return ret;
                        }
                }
        }
-       clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss",
+
+       /*
+        * Enable runtime PM here to allow the clock core using runtime PM
+        * for the registered clocks. Additionally, we increase the runtime
+        * PM usage count before registering the clocks, to prevent the
+        * clock core from runtime suspending the device.
+        */
+       pm_runtime_get_noresume(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(dev, "mout_audss",
                                mout_audss_p, ARRAY_SIZE(mout_audss_p),
                                CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
 
-       cdclk = devm_clk_get(&pdev->dev, "cdclk");
-       sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio");
+       cdclk = devm_clk_get(dev, "cdclk");
+       sclk_audio = devm_clk_get(dev, "sclk_audio");
        if (!IS_ERR(cdclk))
                mout_i2s_p[1] = __clk_get_name(cdclk);
        if (!IS_ERR(sclk_audio))
                mout_i2s_p[2] = __clk_get_name(sclk_audio);
-       clk_table[EXYNOS_MOUT_I2S] = clk_hw_register_mux(NULL, "mout_i2s",
+       clk_table[EXYNOS_MOUT_I2S] = clk_hw_register_mux(dev, "mout_i2s",
                                mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
                                CLK_SET_RATE_NO_REPARENT,
                                reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
 
-       clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(NULL, "dout_srp",
+       clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(dev, "dout_srp",
                                "mout_audss", CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_DIV, 0, 4, 0, &lock);
 
-       clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(NULL,
+       clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(dev,
                                "dout_aud_bus", "dout_srp", CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_DIV, 4, 4, 0, &lock);
 
-       clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(NULL, "dout_i2s",
+       clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(dev, "dout_i2s",
                                "mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0,
                                &lock);
 
-       clk_table[EXYNOS_SRP_CLK] = clk_hw_register_gate(NULL, "srp_clk",
+       clk_table[EXYNOS_SRP_CLK] = clk_hw_register_gate(dev, "srp_clk",
                                "dout_srp", CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_GATE, 0, 0, &lock);
 
-       clk_table[EXYNOS_I2S_BUS] = clk_hw_register_gate(NULL, "i2s_bus",
+       clk_table[EXYNOS_I2S_BUS] = clk_hw_register_gate(dev, "i2s_bus",
                                "dout_aud_bus", CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_GATE, 2, 0, &lock);
 
-       clk_table[EXYNOS_SCLK_I2S] = clk_hw_register_gate(NULL, "sclk_i2s",
+       clk_table[EXYNOS_SCLK_I2S] = clk_hw_register_gate(dev, "sclk_i2s",
                                "dout_i2s", CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_GATE, 3, 0, &lock);
 
-       clk_table[EXYNOS_PCM_BUS] = clk_hw_register_gate(NULL, "pcm_bus",
+       clk_table[EXYNOS_PCM_BUS] = clk_hw_register_gate(dev, "pcm_bus",
                                 "sclk_pcm", CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_GATE, 4, 0, &lock);
 
-       sclk_pcm_in = devm_clk_get(&pdev->dev, "sclk_pcm_in");
+       sclk_pcm_in = devm_clk_get(dev, "sclk_pcm_in");
        if (!IS_ERR(sclk_pcm_in))
                sclk_pcm_p = __clk_get_name(sclk_pcm_in);
-       clk_table[EXYNOS_SCLK_PCM] = clk_hw_register_gate(NULL, "sclk_pcm",
+       clk_table[EXYNOS_SCLK_PCM] = clk_hw_register_gate(dev, "sclk_pcm",
                                sclk_pcm_p, CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_GATE, 5, 0, &lock);
 
        if (variant->has_adma_clk) {
-               clk_table[EXYNOS_ADMA] = clk_hw_register_gate(NULL, "adma",
+               clk_table[EXYNOS_ADMA] = clk_hw_register_gate(dev, "adma",
                                "dout_srp", CLK_SET_RATE_PARENT,
                                reg_base + ASS_CLK_GATE, 9, 0, &lock);
        }
 
        for (i = 0; i < clk_data->num; i++) {
                if (IS_ERR(clk_table[i])) {
-                       dev_err(&pdev->dev, "failed to register clock %d\n", i);
+                       dev_err(dev, "failed to register clock %d\n", i);
                        ret = PTR_ERR(clk_table[i]);
                        goto unregister;
                }
        }
 
-       ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
                                     clk_data);
        if (ret) {
-               dev_err(&pdev->dev, "failed to add clock provider\n");
+               dev_err(dev, "failed to add clock provider\n");
                goto unregister;
        }
 
+       pm_runtime_put_sync(dev);
+
        return 0;
 
 unregister:
        exynos_audss_clk_teardown();
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
@@ -266,6 +281,7 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
        of_clk_del_provider(pdev->dev.of_node);
 
        exynos_audss_clk_teardown();
+       pm_runtime_disable(&pdev->dev);
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
@@ -274,8 +290,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
 }
 
 static const struct dev_pm_ops exynos_audss_clk_pm_ops = {
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_audss_clk_suspend,
-                                    exynos_audss_clk_resume)
+       SET_RUNTIME_PM_OPS(exynos_audss_clk_suspend, exynos_audss_clk_resume,
+                          NULL)
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                    pm_runtime_force_resume)
 };
 
 static struct platform_driver exynos_audss_clk_driver = {
index a21aea062baed6716bcc937aaf149506cd1f65ad..f29fb582400508efbff99f2daa2a1804de378c86 100644 (file)
@@ -144,8 +144,6 @@ static void __init exynos4_clkout_init(struct device_node *node)
 }
 CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
                exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
-               exynos4_clkout_init);
 CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
                exynos4_clkout_init);
 CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
index d8d3cb67b4029ac58c905fcae097a2c6957a9f22..134f25f2a913861c2ce2a2f89ce5a5221e14a98c 100644 (file)
@@ -550,9 +550,8 @@ static const struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __
 
 /* list of mux clocks supported in all exynos4 soc's */
 static const struct samsung_mux_clock exynos4_mux_clks[] __initconst = {
-       MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
-                       CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0,
-                       "mout_apll"),
+       MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+                       CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
        MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
        MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
        MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
@@ -737,7 +736,7 @@ static const struct samsung_div_clock exynos4_div_clks[] __initconst = {
        DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
        DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
        DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
-       DIV(CLK_ARM_CLK, "div_core2", "div_core", DIV_CPU0, 28, 3),
+       DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
        DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
        DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
        DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
@@ -837,6 +836,12 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
        DIV(0, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
        DIV(0, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
        DIV(0, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
+       DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
+       DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+       DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
+};
+
+static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
        DIV_F(CLK_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
                                                CLK_GET_RATE_NOCACHE, 0),
        DIV_F(CLK_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
@@ -846,18 +851,10 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
                                                4, 3, CLK_GET_RATE_NOCACHE, 0),
        DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
                                                8, 3, CLK_GET_RATE_NOCACHE, 0),
-       DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
-       DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
-       DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
 };
 
 /* list of gate clocks supported in all exynos4 soc's */
 static const struct samsung_gate_clock exynos4_gate_clks[] __initconst = {
-       /*
-        * After all Exynos4 based platforms are migrated to use device tree,
-        * the device name and clock alias names specified below for some
-        * of the clocks can be removed.
-        */
        GATE(CLK_PPMULEFT, "ppmuleft", "aclk200", GATE_IP_LEFTBUS, 1, 0, 0),
        GATE(CLK_PPMURIGHT, "ppmuright", "aclk200", GATE_IP_RIGHTBUS, 1, 0, 0),
        GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
@@ -1147,6 +1144,13 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
                        0, 0),
        GATE(CLK_I2S0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
                        0, 0),
+       GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+       GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", GATE_IP_DMC, 24, 0, 0),
+       GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
+               0),
+};
+
+static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = {
        GATE(CLK_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0,
                        CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(CLK_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1,
@@ -1199,24 +1203,6 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
                        CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
        GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
                        CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
-       GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
-       GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", GATE_IP_DMC, 24, 0, 0),
-       GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
-               0),
-};
-
-static const struct samsung_clock_alias exynos4_aliases[] __initconst = {
-       ALIAS(CLK_MOUT_CORE, NULL, "moutcore"),
-       ALIAS(CLK_ARM_CLK, NULL, "armclk"),
-       ALIAS(CLK_SCLK_APLL, NULL, "mout_apll"),
-};
-
-static const struct samsung_clock_alias exynos4210_aliases[] __initconst = {
-       ALIAS(CLK_SCLK_MPLL, NULL, "mout_mpll"),
-};
-
-static const struct samsung_clock_alias exynos4x12_aliases[] __initconst = {
-       ALIAS(CLK_MOUT_MPLL_USER_C, NULL, "mout_mpll"),
 };
 
 /*
@@ -1355,14 +1341,14 @@ static const struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initconst =
 };
 
 static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
-       [apll] = PLL_A(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-               APLL_LOCK, APLL_CON0, "fout_apll", NULL),
-       [mpll] = PLL_A(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
-               E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
-       [epll] = PLL_A(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-               EPLL_LOCK, EPLL_CON0, "fout_epll", NULL),
-       [vpll] = PLL_A(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
-               VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
+       [apll] = PLL(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+               APLL_LOCK, APLL_CON0, NULL),
+       [mpll] = PLL(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+               E4210_MPLL_LOCK, E4210_MPLL_CON0, NULL),
+       [epll] = PLL(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+               EPLL_LOCK, EPLL_CON0, NULL),
+       [vpll] = PLL(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
+               VPLL_LOCK, VPLL_CON0, NULL),
 };
 
 static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
@@ -1416,24 +1402,6 @@ static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = {
        {  0 },
 };
 
-static const struct exynos_cpuclk_cfg_data e4212_armclk_d[] __initconst = {
-       { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
-       { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
-       { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
-       { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
-       { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4210_CPU_DIV1(2, 4), },
-       { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4210_CPU_DIV1(2, 4), },
-       {  900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
-       {  800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
-       {  700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
-       {  600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
-       {  500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
-       {  400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
-       {  300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
-       {  200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4210_CPU_DIV1(2, 3), },
-       {  0 },
-};
-
 #define E4412_CPU_DIV1(cores, hpm, copy)                               \
                (((cores) << 8) | ((hpm) << 4) | ((copy) << 0))
 
@@ -1527,8 +1495,6 @@ static void __init exynos4_clk_init(struct device_node *np,
                        ARRAY_SIZE(exynos4210_div_clks));
                samsung_clk_register_gate(ctx, exynos4210_gate_clks,
                        ARRAY_SIZE(exynos4210_gate_clks));
-               samsung_clk_register_alias(ctx, exynos4210_aliases,
-                       ARRAY_SIZE(exynos4210_aliases));
                samsung_clk_register_fixed_factor(ctx,
                        exynos4210_fixed_factor_clks,
                        ARRAY_SIZE(exynos4210_fixed_factor_clks));
@@ -1537,32 +1503,31 @@ static void __init exynos4_clk_init(struct device_node *np,
                        e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
                        CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
        } else {
+               struct resource res;
+
                samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
                        ARRAY_SIZE(exynos4x12_mux_clks));
                samsung_clk_register_div(ctx, exynos4x12_div_clks,
                        ARRAY_SIZE(exynos4x12_div_clks));
                samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
                        ARRAY_SIZE(exynos4x12_gate_clks));
-               samsung_clk_register_alias(ctx, exynos4x12_aliases,
-                       ARRAY_SIZE(exynos4x12_aliases));
                samsung_clk_register_fixed_factor(ctx,
                        exynos4x12_fixed_factor_clks,
                        ARRAY_SIZE(exynos4x12_fixed_factor_clks));
-               if (of_machine_is_compatible("samsung,exynos4412")) {
-                       exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
-                               mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
-                               e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
-                               CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
-               } else {
-                       exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
-                               mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
-                               e4212_armclk_d, ARRAY_SIZE(e4212_armclk_d),
-                               CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+
+               of_address_to_resource(np, 0, &res);
+               if (resource_size(&res) > 0x18000) {
+                       samsung_clk_register_div(ctx, exynos4x12_isp_div_clks,
+                               ARRAY_SIZE(exynos4x12_isp_div_clks));
+                       samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks,
+                               ARRAY_SIZE(exynos4x12_isp_gate_clks));
                }
-       }
 
-       samsung_clk_register_alias(ctx, exynos4_aliases,
-                       ARRAY_SIZE(exynos4_aliases));
+               exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+                       mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+                       e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
+                       CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+       }
 
        if (soc == EXYNOS4X12)
                exynos4x12_core_down_clock();
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
new file mode 100644 (file)
index 0000000..d5f1ccb
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos4412 ISP module.
+*/
+
+#include <dt-bindings/clock/exynos4.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "clk.h"
+
+/* Exynos4x12 specific registers, which belong to ISP power domain */
+#define E4X12_DIV_ISP0         0x0300
+#define E4X12_DIV_ISP1         0x0304
+#define E4X12_GATE_ISP0                0x0800
+#define E4X12_GATE_ISP1                0x0804
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+static struct samsung_clk_reg_dump *exynos4x12_save_isp;
+
+static const unsigned long exynos4x12_clk_isp_save[] __initconst = {
+       E4X12_DIV_ISP0,
+       E4X12_DIV_ISP1,
+       E4X12_GATE_ISP0,
+       E4X12_GATE_ISP1,
+};
+
+PNAME(mout_user_aclk400_mcuisp_p4x12) = { "fin_pll", "div_aclk400_mcuisp", };
+
+static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
+       DIV(CLK_ISP_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
+       DIV(CLK_ISP_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+       DIV(CLK_ISP_DIV_MCUISP0, "div_mcuisp0", "aclk400_mcuisp",
+           E4X12_DIV_ISP1, 4, 3),
+       DIV(CLK_ISP_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0",
+           E4X12_DIV_ISP1, 8, 3),
+       DIV(0, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
+};
+
+static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = {
+       GATE(CLK_ISP_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0, 0, 0),
+       GATE(CLK_ISP_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1, 0, 0),
+       GATE(CLK_ISP_FIMC_FD, "fd", "aclk200", E4X12_GATE_ISP0, 2, 0, 0),
+       GATE(CLK_ISP_FIMC_LITE0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, 0, 0),
+       GATE(CLK_ISP_FIMC_LITE1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, 0, 0),
+       GATE(CLK_ISP_MCUISP, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, 0, 0),
+       GATE(CLK_ISP_GICISP, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, 0, 0),
+       GATE(CLK_ISP_SMMU_ISP, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, 0, 0),
+       GATE(CLK_ISP_SMMU_DRC, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, 0, 0),
+       GATE(CLK_ISP_SMMU_FD, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, 0, 0),
+       GATE(CLK_ISP_SMMU_LITE0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
+            0, 0),
+       GATE(CLK_ISP_SMMU_LITE1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
+            0, 0),
+       GATE(CLK_ISP_PPMUISPMX, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
+            0, 0),
+       GATE(CLK_ISP_PPMUISPX, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
+            0, 0),
+       GATE(CLK_ISP_MCUCTL_ISP, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
+            0, 0),
+       GATE(CLK_ISP_MPWM_ISP, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
+            0, 0),
+       GATE(CLK_ISP_I2C0_ISP, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
+            0, 0),
+       GATE(CLK_ISP_I2C1_ISP, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
+            0, 0),
+       GATE(CLK_ISP_MTCADC_ISP, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
+            0, 0),
+       GATE(CLK_ISP_PWM_ISP, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, 0, 0),
+       GATE(CLK_ISP_WDT_ISP, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, 0, 0),
+       GATE(CLK_ISP_UART_ISP, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
+            0, 0),
+       GATE(CLK_ISP_ASYNCAXIM, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
+            0, 0),
+       GATE(CLK_ISP_SMMU_ISPCX, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
+            0, 0),
+       GATE(CLK_ISP_SPI0_ISP, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
+            0, 0),
+       GATE(CLK_ISP_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
+            0, 0),
+};
+
+static int __maybe_unused exynos4x12_isp_clk_suspend(struct device *dev)
+{
+       struct samsung_clk_provider *ctx = dev_get_drvdata(dev);
+
+       samsung_clk_save(ctx->reg_base, exynos4x12_save_isp,
+                        ARRAY_SIZE(exynos4x12_clk_isp_save));
+       return 0;
+}
+
+static int __maybe_unused exynos4x12_isp_clk_resume(struct device *dev)
+{
+       struct samsung_clk_provider *ctx = dev_get_drvdata(dev);
+
+       samsung_clk_restore(ctx->reg_base, exynos4x12_save_isp,
+                           ARRAY_SIZE(exynos4x12_clk_isp_save));
+       return 0;
+}
+
+static int __init exynos4x12_isp_clk_probe(struct platform_device *pdev)
+{
+       struct samsung_clk_provider *ctx;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct resource *res;
+       void __iomem *reg_base;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       reg_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(reg_base)) {
+               dev_err(dev, "failed to map registers\n");
+               return PTR_ERR(reg_base);
+       }
+
+       exynos4x12_save_isp = samsung_clk_alloc_reg_dump(exynos4x12_clk_isp_save,
+                                       ARRAY_SIZE(exynos4x12_clk_isp_save));
+       if (!exynos4x12_save_isp)
+               return -ENOMEM;
+
+       ctx = samsung_clk_init(np, reg_base, CLK_NR_ISP_CLKS);
+       ctx->dev = dev;
+
+       platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
+
+       samsung_clk_register_div(ctx, exynos4x12_isp_div_clks,
+                                ARRAY_SIZE(exynos4x12_isp_div_clks));
+       samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks,
+                                 ARRAY_SIZE(exynos4x12_isp_gate_clks));
+
+       samsung_clk_of_add_provider(np, ctx);
+       pm_runtime_put(dev);
+
+       return 0;
+}
+
+static const struct of_device_id exynos4x12_isp_clk_of_match[] = {
+       { .compatible = "samsung,exynos4412-isp-clock", },
+       { },
+};
+
+static const struct dev_pm_ops exynos4x12_isp_pm_ops = {
+       SET_RUNTIME_PM_OPS(exynos4x12_isp_clk_suspend,
+                          exynos4x12_isp_clk_resume, NULL)
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                    pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos4x12_isp_clk_driver __refdata = {
+       .driver = {
+               .name = "exynos4x12-isp-clk",
+               .of_match_table = exynos4x12_isp_clk_of_match,
+               .suppress_bind_attrs = true,
+               .pm = &exynos4x12_isp_pm_ops,
+       },
+       .probe = exynos4x12_isp_clk_probe,
+};
+
+static int __init exynos4x12_isp_clk_init(void)
+{
+       return platform_driver_register(&exynos4x12_isp_clk_driver);
+}
+core_initcall(exynos4x12_isp_clk_init);
index 27a227d6620c7b32bedf04bf16f2f2a7f71ce49b..9b073c98a8910e02be6420454a6d198275de27fc 100644 (file)
@@ -293,14 +293,14 @@ static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = {
        /*
         * CMU_CPU
         */
-       MUX_FA(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
-                                       CLK_SET_RATE_PARENT, 0, "mout_apll"),
-       MUX_A(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
+       MUX_F(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+                                       CLK_SET_RATE_PARENT, 0),
+       MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
 
        /*
         * CMU_CORE
         */
-       MUX_A(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
+       MUX(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
 
        /*
         * CMU_TOP
@@ -391,7 +391,7 @@ static const struct samsung_div_clock exynos5250_div_clks[] __initconst = {
         */
        DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
        DIV(0, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
-       DIV_A(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3, "armclk"),
+       DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
 
        /*
         * CMU_TOP
@@ -743,10 +743,10 @@ static const struct samsung_pll_rate_table apll_24mhz_tbl[] __initconst = {
 };
 
 static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
-       [apll] = PLL_A(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-               APLL_LOCK, APLL_CON0, "fout_apll", NULL),
-       [mpll] = PLL_A(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
-               MPLL_LOCK, MPLL_CON0, "fout_mpll", NULL),
+       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
+               APLL_CON0, NULL),
+       [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK,
+               MPLL_CON0, NULL),
        [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
                BPLL_CON0, NULL),
        [gpll] = PLL(pll_35xx, CLK_FOUT_GPLL, "fout_gpll", "fin_pll", GPLL_LOCK,
index 25601967d1cd6da55c733e41f1057670ae7ff87e..45d34f601e9e0abfa5eac115272df833c4c86ce1 100644 (file)
@@ -600,8 +600,7 @@ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
                                TOP_SPARE2, 4, 1),
 
        MUX(0, "mout_aclk400_isp", mout_group1_p, SRC_TOP0, 0, 2),
-       MUX_A(0, "mout_aclk400_mscl", mout_group1_p,
-                               SRC_TOP0, 4, 2, "aclk400_mscl"),
+       MUX(0, "mout_aclk400_mscl", mout_group1_p, SRC_TOP0, 4, 2),
        MUX(0, "mout_aclk400_wcore", mout_group1_p, SRC_TOP0, 16, 2),
        MUX(0, "mout_aclk100_noc", mout_group1_p, SRC_TOP0, 20, 2),
 
@@ -998,7 +997,7 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
                        GATE_BUS_TOP, 16, 0, 0),
        GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
-                       GATE_BUS_TOP, 17, 0, 0),
+                       GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
                        GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
        GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
index 11343a5970933d14c490006727c9fc39a86e1a5e..db270908037abc097d1ec7b43a1a10677cc7eaed 100644 (file)
@@ -9,9 +9,13 @@
  * Common Clock Framework support for Exynos5433 SoC.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 
 #include <dt-bindings/clock/exynos5433.h>
 
@@ -1991,6 +1995,14 @@ static const unsigned long fsys_clk_regs[] __initconst = {
        ENABLE_IP_FSYS1,
 };
 
+static const struct samsung_clk_reg_dump fsys_suspend_regs[] = {
+       { MUX_SEL_FSYS0, 0 },
+       { MUX_SEL_FSYS1, 0 },
+       { MUX_SEL_FSYS2, 0 },
+       { MUX_SEL_FSYS3, 0 },
+       { MUX_SEL_FSYS4, 0 },
+};
+
 static const struct samsung_fixed_rate_clock fsys_fixed_clks[] __initconst = {
        /* PHY clocks from USBDRD30_PHY */
        FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY,
@@ -2296,16 +2308,11 @@ static const struct samsung_cmu_info fsys_cmu_info __initconst = {
        .nr_clk_ids             = FSYS_NR_CLK,
        .clk_regs               = fsys_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(fsys_clk_regs),
+       .suspend_regs           = fsys_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(fsys_suspend_regs),
+       .clk_name               = "aclk_fsys_200",
 };
 
-static void __init exynos5433_cmu_fsys_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &fsys_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys",
-               exynos5433_cmu_fsys_init);
-
 /*
  * Register offset definitions for CMU_G2D
  */
@@ -2335,6 +2342,10 @@ static const unsigned long g2d_clk_regs[] __initconst = {
        DIV_ENABLE_IP_G2D_SECURE_SMMU_G2D,
 };
 
+static const struct samsung_clk_reg_dump g2d_suspend_regs[] = {
+       { MUX_SEL_G2D0, 0 },
+};
+
 /* list of all parent clock list */
 PNAME(mout_aclk_g2d_266_user_p)                = { "oscclk", "aclk_g2d_266", };
 PNAME(mout_aclk_g2d_400_user_p)                = { "oscclk", "aclk_g2d_400", };
@@ -2420,16 +2431,11 @@ static const struct samsung_cmu_info g2d_cmu_info __initconst = {
        .nr_clk_ids             = G2D_NR_CLK,
        .clk_regs               = g2d_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(g2d_clk_regs),
+       .suspend_regs           = g2d_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(g2d_suspend_regs),
+       .clk_name               = "aclk_g2d_400",
 };
 
-static void __init exynos5433_cmu_g2d_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &g2d_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_g2d, "samsung,exynos5433-cmu-g2d",
-               exynos5433_cmu_g2d_init);
-
 /*
  * Register offset definitions for CMU_DISP
  */
@@ -2494,6 +2500,18 @@ static const unsigned long disp_clk_regs[] __initconst = {
        CLKOUT_CMU_DISP_DIV_STAT,
 };
 
+static const struct samsung_clk_reg_dump disp_suspend_regs[] = {
+       /* PLL has to be enabled for suspend */
+       { DISP_PLL_CON0, 0x85f40502 },
+       /* ignore status of external PHY muxes during suspend to avoid hangs */
+       { MUX_IGNORE_DISP2, 0x00111111 },
+       { MUX_SEL_DISP0, 0 },
+       { MUX_SEL_DISP1, 0 },
+       { MUX_SEL_DISP2, 0 },
+       { MUX_SEL_DISP3, 0 },
+       { MUX_SEL_DISP4, 0 },
+};
+
 /* list of all parent clock list */
 PNAME(mout_disp_pll_p)                 = { "oscclk", "fout_disp_pll", };
 PNAME(mout_sclk_dsim1_user_p)          = { "oscclk", "sclk_dsim1_disp", };
@@ -2841,16 +2859,11 @@ static const struct samsung_cmu_info disp_cmu_info __initconst = {
        .nr_clk_ids             = DISP_NR_CLK,
        .clk_regs               = disp_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(disp_clk_regs),
+       .suspend_regs           = disp_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(disp_suspend_regs),
+       .clk_name               = "aclk_disp_333",
 };
 
-static void __init exynos5433_cmu_disp_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &disp_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_disp, "samsung,exynos5433-cmu-disp",
-               exynos5433_cmu_disp_init);
-
 /*
  * Register offset definitions for CMU_AUD
  */
@@ -2885,6 +2898,11 @@ static const unsigned long aud_clk_regs[] __initconst = {
        ENABLE_IP_AUD1,
 };
 
+static const struct samsung_clk_reg_dump aud_suspend_regs[] = {
+       { MUX_SEL_AUD0, 0 },
+       { MUX_SEL_AUD1, 0 },
+};
+
 /* list of all parent clock list */
 PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", };
 PNAME(mout_sclk_aud_pcm_p)     = { "mout_aud_pll_user", "ioclk_audiocdclk0",};
@@ -3011,16 +3029,11 @@ static const struct samsung_cmu_info aud_cmu_info __initconst = {
        .nr_clk_ids             = AUD_NR_CLK,
        .clk_regs               = aud_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(aud_clk_regs),
+       .suspend_regs           = aud_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(aud_suspend_regs),
+       .clk_name               = "fout_aud_pll",
 };
 
-static void __init exynos5433_cmu_aud_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &aud_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_aud, "samsung,exynos5433-cmu-aud",
-               exynos5433_cmu_aud_init);
-
-
 /*
  * Register offset definitions for CMU_BUS{0|1|2}
  */
@@ -3222,6 +3235,10 @@ static const unsigned long g3d_clk_regs[] __initconst = {
        CLK_STOPCTRL,
 };
 
+static const struct samsung_clk_reg_dump g3d_suspend_regs[] = {
+       { MUX_SEL_G3D, 0 },
+};
+
 /* list of all parent clock list */
 PNAME(mout_aclk_g3d_400_p)     = { "mout_g3d_pll", "aclk_g3d_400", };
 PNAME(mout_g3d_pll_p)          = { "oscclk", "fout_g3d_pll", };
@@ -3295,15 +3312,11 @@ static const struct samsung_cmu_info g3d_cmu_info __initconst = {
        .nr_clk_ids             = G3D_NR_CLK,
        .clk_regs               = g3d_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(g3d_clk_regs),
+       .suspend_regs           = g3d_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(g3d_suspend_regs),
+       .clk_name               = "aclk_g3d_400",
 };
 
-static void __init exynos5433_cmu_g3d_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &g3d_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d",
-               exynos5433_cmu_g3d_init);
-
 /*
  * Register offset definitions for CMU_GSCL
  */
@@ -3342,6 +3355,12 @@ static const unsigned long gscl_clk_regs[] __initconst = {
        ENABLE_IP_GSCL_SECURE_SMMU_GSCL2,
 };
 
+static const struct samsung_clk_reg_dump gscl_suspend_regs[] = {
+       { MUX_SEL_GSCL, 0 },
+       { ENABLE_ACLK_GSCL, 0xfff },
+       { ENABLE_PCLK_GSCL, 0xff },
+};
+
 /* list of all parent clock list */
 PNAME(aclk_gscl_111_user_p)    = { "oscclk", "aclk_gscl_111", };
 PNAME(aclk_gscl_333_user_p)    = { "oscclk", "aclk_gscl_333", };
@@ -3436,15 +3455,11 @@ static const struct samsung_cmu_info gscl_cmu_info __initconst = {
        .nr_clk_ids             = GSCL_NR_CLK,
        .clk_regs               = gscl_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(gscl_clk_regs),
+       .suspend_regs           = gscl_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(gscl_suspend_regs),
+       .clk_name               = "aclk_gscl_111",
 };
 
-static void __init exynos5433_cmu_gscl_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &gscl_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_gscl, "samsung,exynos5433-cmu-gscl",
-               exynos5433_cmu_gscl_init);
-
 /*
  * Register offset definitions for CMU_APOLLO
  */
@@ -3970,6 +3985,11 @@ static const unsigned long mscl_clk_regs[] __initconst = {
        ENABLE_IP_MSCL_SECURE_SMMU_JPEG,
 };
 
+static const struct samsung_clk_reg_dump mscl_suspend_regs[] = {
+       { MUX_SEL_MSCL0, 0 },
+       { MUX_SEL_MSCL1, 0 },
+};
+
 /* list of all parent clock list */
 PNAME(mout_sclk_jpeg_user_p)           = { "oscclk", "sclk_jpeg_mscl", };
 PNAME(mout_aclk_mscl_400_user_p)       = { "oscclk", "aclk_mscl_400", };
@@ -4082,15 +4102,11 @@ static const struct samsung_cmu_info mscl_cmu_info __initconst = {
        .nr_clk_ids             = MSCL_NR_CLK,
        .clk_regs               = mscl_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(mscl_clk_regs),
+       .suspend_regs           = mscl_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(mscl_suspend_regs),
+       .clk_name               = "aclk_mscl_400",
 };
 
-static void __init exynos5433_cmu_mscl_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &mscl_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl",
-               exynos5433_cmu_mscl_init);
-
 /*
  * Register offset definitions for CMU_MFC
  */
@@ -4120,6 +4136,10 @@ static const unsigned long mfc_clk_regs[] __initconst = {
        ENABLE_IP_MFC_SECURE_SMMU_MFC,
 };
 
+static const struct samsung_clk_reg_dump mfc_suspend_regs[] = {
+       { MUX_SEL_MFC, 0 },
+};
+
 PNAME(mout_aclk_mfc_400_user_p)                = { "oscclk", "aclk_mfc_400", };
 
 static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
@@ -4190,15 +4210,11 @@ static const struct samsung_cmu_info mfc_cmu_info __initconst = {
        .nr_clk_ids             = MFC_NR_CLK,
        .clk_regs               = mfc_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(mfc_clk_regs),
+       .suspend_regs           = mfc_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(mfc_suspend_regs),
+       .clk_name               = "aclk_mfc_400",
 };
 
-static void __init exynos5433_cmu_mfc_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &mfc_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc",
-               exynos5433_cmu_mfc_init);
-
 /*
  * Register offset definitions for CMU_HEVC
  */
@@ -4228,6 +4244,10 @@ static const unsigned long hevc_clk_regs[] __initconst = {
        ENABLE_IP_HEVC_SECURE_SMMU_HEVC,
 };
 
+static const struct samsung_clk_reg_dump hevc_suspend_regs[] = {
+       { MUX_SEL_HEVC, 0 },
+};
+
 PNAME(mout_aclk_hevc_400_user_p)       = { "oscclk", "aclk_hevc_400", };
 
 static const struct samsung_mux_clock hevc_mux_clks[] __initconst = {
@@ -4300,15 +4320,11 @@ static const struct samsung_cmu_info hevc_cmu_info __initconst = {
        .nr_clk_ids             = HEVC_NR_CLK,
        .clk_regs               = hevc_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(hevc_clk_regs),
+       .suspend_regs           = hevc_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(hevc_suspend_regs),
+       .clk_name               = "aclk_hevc_400",
 };
 
-static void __init exynos5433_cmu_hevc_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &hevc_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc",
-               exynos5433_cmu_hevc_init);
-
 /*
  * Register offset definitions for CMU_ISP
  */
@@ -4342,6 +4358,10 @@ static const unsigned long isp_clk_regs[] __initconst = {
        ENABLE_IP_ISP3,
 };
 
+static const struct samsung_clk_reg_dump isp_suspend_regs[] = {
+       { MUX_SEL_ISP, 0 },
+};
+
 PNAME(mout_aclk_isp_dis_400_user_p)    = { "oscclk", "aclk_isp_dis_400", };
 PNAME(mout_aclk_isp_400_user_p)                = { "oscclk", "aclk_isp_400", };
 
@@ -4553,15 +4573,11 @@ static const struct samsung_cmu_info isp_cmu_info __initconst = {
        .nr_clk_ids             = ISP_NR_CLK,
        .clk_regs               = isp_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(isp_clk_regs),
+       .suspend_regs           = isp_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(isp_suspend_regs),
+       .clk_name               = "aclk_isp_400",
 };
 
-static void __init exynos5433_cmu_isp_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &isp_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_isp, "samsung,exynos5433-cmu-isp",
-               exynos5433_cmu_isp_init);
-
 /*
  * Register offset definitions for CMU_CAM0
  */
@@ -4625,6 +4641,15 @@ static const unsigned long cam0_clk_regs[] __initconst = {
        ENABLE_IP_CAM02,
        ENABLE_IP_CAM03,
 };
+
+static const struct samsung_clk_reg_dump cam0_suspend_regs[] = {
+       { MUX_SEL_CAM00, 0 },
+       { MUX_SEL_CAM01, 0 },
+       { MUX_SEL_CAM02, 0 },
+       { MUX_SEL_CAM03, 0 },
+       { MUX_SEL_CAM04, 0 },
+};
+
 PNAME(mout_aclk_cam0_333_user_p)       = { "oscclk", "aclk_cam0_333", };
 PNAME(mout_aclk_cam0_400_user_p)       = { "oscclk", "aclk_cam0_400", };
 PNAME(mout_aclk_cam0_552_user_p)       = { "oscclk", "aclk_cam0_552", };
@@ -5030,15 +5055,11 @@ static const struct samsung_cmu_info cam0_cmu_info __initconst = {
        .nr_clk_ids             = CAM0_NR_CLK,
        .clk_regs               = cam0_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(cam0_clk_regs),
+       .suspend_regs           = cam0_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(cam0_suspend_regs),
+       .clk_name               = "aclk_cam0_400",
 };
 
-static void __init exynos5433_cmu_cam0_init(struct device_node *np)
-{
-       samsung_cmu_register_one(np, &cam0_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_cam0, "samsung,exynos5433-cmu-cam0",
-               exynos5433_cmu_cam0_init);
-
 /*
  * Register offset definitions for CMU_CAM1
  */
@@ -5085,6 +5106,12 @@ static const unsigned long cam1_clk_regs[] __initconst = {
        ENABLE_IP_CAM12,
 };
 
+static const struct samsung_clk_reg_dump cam1_suspend_regs[] = {
+       { MUX_SEL_CAM10, 0 },
+       { MUX_SEL_CAM11, 0 },
+       { MUX_SEL_CAM12, 0 },
+};
+
 PNAME(mout_sclk_isp_uart_user_p)       = { "oscclk", "sclk_isp_uart_cam1", };
 PNAME(mout_sclk_isp_spi1_user_p)       = { "oscclk", "sclk_isp_spi1_cam1", };
 PNAME(mout_sclk_isp_spi0_user_p)       = { "oscclk", "sclk_isp_spi0_cam1", };
@@ -5403,11 +5430,223 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = {
        .nr_clk_ids             = CAM1_NR_CLK,
        .clk_regs               = cam1_clk_regs,
        .nr_clk_regs            = ARRAY_SIZE(cam1_clk_regs),
+       .suspend_regs           = cam1_suspend_regs,
+       .nr_suspend_regs        = ARRAY_SIZE(cam1_suspend_regs),
+       .clk_name               = "aclk_cam1_400",
+};
+
+
+struct exynos5433_cmu_data {
+       struct samsung_clk_reg_dump *clk_save;
+       unsigned int nr_clk_save;
+       const struct samsung_clk_reg_dump *clk_suspend;
+       unsigned int nr_clk_suspend;
+
+       struct clk *clk;
+       struct clk **pclks;
+       int nr_pclks;
+
+       /* must be the last entry */
+       struct samsung_clk_provider ctx;
+};
+
+static int __maybe_unused exynos5433_cmu_suspend(struct device *dev)
+{
+       struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
+       int i;
+
+       samsung_clk_save(data->ctx.reg_base, data->clk_save,
+                        data->nr_clk_save);
+
+       for (i = 0; i < data->nr_pclks; i++)
+               clk_prepare_enable(data->pclks[i]);
+
+       /* for suspend some registers have to be set to certain values */
+       samsung_clk_restore(data->ctx.reg_base, data->clk_suspend,
+                           data->nr_clk_suspend);
+
+       for (i = 0; i < data->nr_pclks; i++)
+               clk_disable_unprepare(data->pclks[i]);
+
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static int __maybe_unused exynos5433_cmu_resume(struct device *dev)
+{
+       struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
+       int i;
+
+       clk_prepare_enable(data->clk);
+
+       for (i = 0; i < data->nr_pclks; i++)
+               clk_prepare_enable(data->pclks[i]);
+
+       samsung_clk_restore(data->ctx.reg_base, data->clk_save,
+                           data->nr_clk_save);
+
+       for (i = 0; i < data->nr_pclks; i++)
+               clk_disable_unprepare(data->pclks[i]);
+
+       return 0;
+}
+
+static int __init exynos5433_cmu_probe(struct platform_device *pdev)
+{
+       const struct samsung_cmu_info *info;
+       struct exynos5433_cmu_data *data;
+       struct samsung_clk_provider *ctx;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       void __iomem *reg_base;
+       int i;
+
+       info = of_device_get_match_data(dev);
+
+       data = devm_kzalloc(dev, sizeof(*data) +
+                           sizeof(*data->ctx.clk_data.hws) * info->nr_clk_ids,
+                           GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       ctx = &data->ctx;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       reg_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(reg_base)) {
+               dev_err(dev, "failed to map registers\n");
+               return PTR_ERR(reg_base);
+       }
+
+       for (i = 0; i < info->nr_clk_ids; ++i)
+               ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+
+       ctx->clk_data.num = info->nr_clk_ids;
+       ctx->reg_base = reg_base;
+       ctx->dev = dev;
+       spin_lock_init(&ctx->lock);
+
+       data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
+                                                   info->nr_clk_regs);
+       data->nr_clk_save = info->nr_clk_regs;
+       data->clk_suspend = info->suspend_regs;
+       data->nr_clk_suspend = info->nr_suspend_regs;
+       data->nr_pclks = of_count_phandle_with_args(dev->of_node, "clocks",
+                                                   "#clock-cells");
+       if (data->nr_pclks > 0) {
+               data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
+                                          data->nr_pclks, GFP_KERNEL);
+
+               for (i = 0; i < data->nr_pclks; i++) {
+                       struct clk *clk = of_clk_get(dev->of_node, i);
+
+                       if (IS_ERR(clk))
+                               return PTR_ERR(clk);
+                       data->pclks[i] = clk;
+               }
+       }
+
+       if (info->clk_name)
+               data->clk = clk_get(dev, info->clk_name);
+       clk_prepare_enable(data->clk);
+
+       platform_set_drvdata(pdev, data);
+
+       /*
+        * Enable runtime PM here to allow the clock core using runtime PM
+        * for the registered clocks. Additionally, we increase the runtime
+        * PM usage count before registering the clocks, to prevent the
+        * clock core from runtime suspending the device.
+        */
+       pm_runtime_get_noresume(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       if (info->pll_clks)
+               samsung_clk_register_pll(ctx, info->pll_clks, info->nr_pll_clks,
+                                        reg_base);
+       if (info->mux_clks)
+               samsung_clk_register_mux(ctx, info->mux_clks,
+                                        info->nr_mux_clks);
+       if (info->div_clks)
+               samsung_clk_register_div(ctx, info->div_clks,
+                                        info->nr_div_clks);
+       if (info->gate_clks)
+               samsung_clk_register_gate(ctx, info->gate_clks,
+                                         info->nr_gate_clks);
+       if (info->fixed_clks)
+               samsung_clk_register_fixed_rate(ctx, info->fixed_clks,
+                                               info->nr_fixed_clks);
+       if (info->fixed_factor_clks)
+               samsung_clk_register_fixed_factor(ctx, info->fixed_factor_clks,
+                                                 info->nr_fixed_factor_clks);
+
+       samsung_clk_of_add_provider(dev->of_node, ctx);
+       pm_runtime_put_sync(dev);
+
+       return 0;
+}
+
+static const struct of_device_id exynos5433_cmu_of_match[] = {
+       {
+               .compatible = "samsung,exynos5433-cmu-aud",
+               .data = &aud_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-cam0",
+               .data = &cam0_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-cam1",
+               .data = &cam1_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-disp",
+               .data = &disp_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-g2d",
+               .data = &g2d_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-g3d",
+               .data = &g3d_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-fsys",
+               .data = &fsys_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-gscl",
+               .data = &gscl_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-mfc",
+               .data = &mfc_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-hevc",
+               .data = &hevc_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-isp",
+               .data = &isp_cmu_info,
+       }, {
+               .compatible = "samsung,exynos5433-cmu-mscl",
+               .data = &mscl_cmu_info,
+       }, {
+       },
+};
+
+static const struct dev_pm_ops exynos5433_cmu_pm_ops = {
+       SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume,
+                          NULL)
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                    pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos5433_cmu_driver __refdata = {
+       .driver = {
+               .name = "exynos5433-cmu",
+               .of_match_table = exynos5433_cmu_of_match,
+               .suppress_bind_attrs = true,
+               .pm = &exynos5433_cmu_pm_ops,
+       },
+       .probe = exynos5433_cmu_probe,
 };
 
-static void __init exynos5433_cmu_cam1_init(struct device_node *np)
+static int __init exynos5433_cmu_init(void)
 {
-       samsung_cmu_register_one(np, &cam1_cmu_info);
+       return platform_driver_register(&exynos5433_cmu_driver);
 }
-CLK_OF_DECLARE(exynos5433_cmu_cam1, "samsung,exynos5433-cmu-cam1",
-               exynos5433_cmu_cam1_init);
+core_initcall(exynos5433_cmu_init);
index a80f3ef208018758c5e5774abadc7b700ebf39da..b08bd54c5e766833b5355e69b6acd165731aef61 100644 (file)
@@ -53,8 +53,7 @@ static const struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __
 /* mux clocks */
 static const struct samsung_mux_clock exynos5440_mux_clks[] __initconst = {
        MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
-       MUX_A(CLK_ARM_CLK, "arm_clk", mout_armclk_p,
-                       CPU_CLK_STATUS, 0, 1, "armclk"),
+       MUX(CLK_ARM_CLK, "arm_clk", mout_armclk_p, CPU_CLK_STATUS, 0, 1),
 };
 
 /* divider clocks */
@@ -117,6 +116,13 @@ static const struct samsung_pll_clock exynos5440_plls[] __initconst = {
        PLL(pll_2550x, CLK_CPLLB, "cpllb", "xtal", 0, 0x50, NULL),
 };
 
+/*
+ * Clock aliases for legacy clkdev look-up.
+ */
+static const struct samsung_clock_alias exynos5440_aliases[] __initconst = {
+       ALIAS(CLK_ARM_CLK, NULL, "armclk"),
+};
+
 /* register exynos5440 clocks */
 static void __init exynos5440_clk_init(struct device_node *np)
 {
@@ -147,6 +153,8 @@ static void __init exynos5440_clk_init(struct device_node *np)
                        ARRAY_SIZE(exynos5440_div_clks));
        samsung_clk_register_gate(ctx, exynos5440_gate_clks,
                        ARRAY_SIZE(exynos5440_gate_clks));
+       samsung_clk_register_alias(ctx, exynos5440_aliases,
+                                               ARRAY_SIZE(exynos5440_aliases));
 
        samsung_clk_of_add_provider(np, ctx);
 
index 037c6148409872b46f35f9613244742fd52b3a09..1c4c7a3039f1bc53a5ccb1bc558fd304db50c677 100644 (file)
@@ -1388,7 +1388,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
        pll->lock_reg = base + pll_clk->lock_offset;
        pll->con_reg = base + pll_clk->con_offset;
 
-       ret = clk_hw_register(NULL, &pll->hw);
+       ret = clk_hw_register(ctx->dev, &pll->hw);
        if (ret) {
                pr_err("%s: failed to register pll clock %s : %d\n",
                        __func__, pll_clk->name, ret);
@@ -1397,15 +1397,6 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
        }
 
        samsung_clk_add_lookup(ctx, &pll->hw, pll_clk->id);
-
-       if (!pll_clk->alias)
-               return;
-
-       ret = clk_hw_register_clkdev(&pll->hw, pll_clk->alias,
-                                    pll_clk->dev_name);
-       if (ret)
-               pr_err("%s: failed to register lookup for %s : %d",
-                       __func__, pll_clk->name, ret);
 }
 
 void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
index abb935c4291699c957c242c5192512349e31524b..d94b85a4235604bfd8212d80925382acd7a9d9b3 100644 (file)
@@ -117,8 +117,8 @@ struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
        MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
        MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
        MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
-       MUX_A(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1, "msysclk"),
-       MUX_A(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1, "armclk"),
+       MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
+       MUX(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1),
        MUX(0, "mux_i2s0", i2s0_p, CLKSRC, 14, 2),
 };
 
@@ -189,6 +189,10 @@ struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
 };
 
 struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
+       ALIAS(MSYSCLK, NULL, "msysclk"),
+       ALIAS(ARMCLK, NULL, "armclk"),
+       ALIAS(MPLL, NULL, "mpll"),
+       ALIAS(EPLL, NULL, "epll"),
        ALIAS(HCLK, NULL, "hclk"),
        ALIAS(HCLK_SSMC, NULL, "nand"),
        ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
@@ -221,9 +225,9 @@ struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
 /* S3C2416 specific clocks */
 
 static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
-       [mpll] = PLL(pll_6552_s3c2416, 0, "mpll", "mpllref",
+       [mpll] = PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref",
                                                LOCKCON0, MPLLCON, NULL),
-       [epll] = PLL(pll_6553, 0, "epll", "epllref",
+       [epll] = PLL(pll_6553, EPLL, "epll", "epllref",
                                                LOCKCON1, EPLLCON, NULL),
 };
 
@@ -275,9 +279,9 @@ struct samsung_clock_alias s3c2416_aliases[] __initdata = {
 /* S3C2443 specific clocks */
 
 static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
-       [mpll] = PLL(pll_3000, 0, "mpll", "mpllref",
+       [mpll] = PLL(pll_3000, MPLL, "mpll", "mpllref",
                                                LOCKCON0, MPLLCON, NULL),
-       [epll] = PLL(pll_2126, 0, "epll", "epllref",
+       [epll] = PLL(pll_2126, EPLL, "epll", "epllref",
                                                LOCKCON1, EPLLCON, NULL),
 };
 
index 7ce0fa86c5ff821b0a2745e589b8f047b66b4e1b..8634884aa11ce421fb9a3485d6f1af2408bd5fd4 100644 (file)
@@ -134,7 +134,7 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
        unsigned int idx, ret;
 
        for (idx = 0; idx < nr_clk; idx++, list++) {
-               clk_hw = clk_hw_register_fixed_rate(NULL, list->name,
+               clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name,
                        list->parent_name, list->flags, list->fixed_rate);
                if (IS_ERR(clk_hw)) {
                        pr_err("%s: failed to register clock %s\n", __func__,
@@ -163,7 +163,7 @@ void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
        unsigned int idx;
 
        for (idx = 0; idx < nr_clk; idx++, list++) {
-               clk_hw = clk_hw_register_fixed_factor(NULL, list->name,
+               clk_hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
                        list->parent_name, list->flags, list->mult, list->div);
                if (IS_ERR(clk_hw)) {
                        pr_err("%s: failed to register clock %s\n", __func__,
@@ -181,10 +181,10 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
                                unsigned int nr_clk)
 {
        struct clk_hw *clk_hw;
-       unsigned int idx, ret;
+       unsigned int idx;
 
        for (idx = 0; idx < nr_clk; idx++, list++) {
-               clk_hw = clk_hw_register_mux(NULL, list->name,
+               clk_hw = clk_hw_register_mux(ctx->dev, list->name,
                        list->parent_names, list->num_parents, list->flags,
                        ctx->reg_base + list->offset,
                        list->shift, list->width, list->mux_flags, &ctx->lock);
@@ -195,15 +195,6 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
                }
 
                samsung_clk_add_lookup(ctx, clk_hw, list->id);
-
-               /* register a clock lookup only if a clock alias is specified */
-               if (list->alias) {
-                       ret = clk_hw_register_clkdev(clk_hw, list->alias,
-                                               list->dev_name);
-                       if (ret)
-                               pr_err("%s: failed to register lookup %s\n",
-                                               __func__, list->alias);
-               }
        }
 }
 
@@ -213,17 +204,17 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
                                unsigned int nr_clk)
 {
        struct clk_hw *clk_hw;
-       unsigned int idx, ret;
+       unsigned int idx;
 
        for (idx = 0; idx < nr_clk; idx++, list++) {
                if (list->table)
-                       clk_hw = clk_hw_register_divider_table(NULL,
+                       clk_hw = clk_hw_register_divider_table(ctx->dev,
                                list->name, list->parent_name, list->flags,
                                ctx->reg_base + list->offset,
                                list->shift, list->width, list->div_flags,
                                list->table, &ctx->lock);
                else
-                       clk_hw = clk_hw_register_divider(NULL, list->name,
+                       clk_hw = clk_hw_register_divider(ctx->dev, list->name,
                                list->parent_name, list->flags,
                                ctx->reg_base + list->offset, list->shift,
                                list->width, list->div_flags, &ctx->lock);
@@ -234,15 +225,6 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
                }
 
                samsung_clk_add_lookup(ctx, clk_hw, list->id);
-
-               /* register a clock lookup only if a clock alias is specified */
-               if (list->alias) {
-                       ret = clk_hw_register_clkdev(clk_hw, list->alias,
-                                               list->dev_name);
-                       if (ret)
-                               pr_err("%s: failed to register lookup %s\n",
-                                               __func__, list->alias);
-               }
        }
 }
 
@@ -252,10 +234,10 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
                                unsigned int nr_clk)
 {
        struct clk_hw *clk_hw;
-       unsigned int idx, ret;
+       unsigned int idx;
 
        for (idx = 0; idx < nr_clk; idx++, list++) {
-               clk_hw = clk_hw_register_gate(NULL, list->name, list->parent_name,
+               clk_hw = clk_hw_register_gate(ctx->dev, list->name, list->parent_name,
                                list->flags, ctx->reg_base + list->offset,
                                list->bit_idx, list->gate_flags, &ctx->lock);
                if (IS_ERR(clk_hw)) {
@@ -264,15 +246,6 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
                        continue;
                }
 
-               /* register a clock lookup only if a clock alias is specified */
-               if (list->alias) {
-                       ret = clk_hw_register_clkdev(clk_hw, list->alias,
-                                                       list->dev_name);
-                       if (ret)
-                               pr_err("%s: failed to register lookup %s\n",
-                                       __func__, list->alias);
-               }
-
                samsung_clk_add_lookup(ctx, clk_hw, list->id);
        }
 }
index b8ca0dd3a38b771f2b2af998ad19cfff5e216fde..3880d2f9d5829df357f0c4b9f9639bbfd171599d 100644 (file)
@@ -24,6 +24,7 @@
  */
 struct samsung_clk_provider {
        void __iomem *reg_base;
+       struct device *dev;
        spinlock_t lock;
        /* clk_data must be the last entry due to variable lenght 'hws' array */
        struct clk_hw_onecell_data clk_data;
@@ -106,7 +107,6 @@ struct samsung_fixed_factor_clock {
 /**
  * struct samsung_mux_clock: information about mux clock
  * @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
  * @name: name of this mux clock.
  * @parent_names: array of pointer to parent clock names.
  * @num_parents: number of parents listed in @parent_names.
@@ -115,11 +115,9 @@ struct samsung_fixed_factor_clock {
  * @shift: starting bit location of the mux control bit-field in @reg.
  * @width: width of the mux control bit-field in @reg.
  * @mux_flags: flags for mux-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
  */
 struct samsung_mux_clock {
        unsigned int            id;
-       const char              *dev_name;
        const char              *name;
        const char              *const *parent_names;
        u8                      num_parents;
@@ -128,13 +126,11 @@ struct samsung_mux_clock {
        u8                      shift;
        u8                      width;
        u8                      mux_flags;
-       const char              *alias;
 };
 
-#define __MUX(_id, dname, cname, pnames, o, s, w, f, mf, a)    \
+#define __MUX(_id, cname, pnames, o, s, w, f, mf)              \
        {                                                       \
                .id             = _id,                          \
-               .dev_name       = dname,                        \
                .name           = cname,                        \
                .parent_names   = pnames,                       \
                .num_parents    = ARRAY_SIZE(pnames),           \
@@ -143,36 +139,26 @@ struct samsung_mux_clock {
                .shift          = s,                            \
                .width          = w,                            \
                .mux_flags      = mf,                           \
-               .alias          = a,                            \
        }
 
 #define MUX(_id, cname, pnames, o, s, w)                       \
-       __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, NULL)
-
-#define MUX_A(_id, cname, pnames, o, s, w, a)                  \
-       __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, a)
+       __MUX(_id, cname, pnames, o, s, w, 0, 0)
 
 #define MUX_F(_id, cname, pnames, o, s, w, f, mf)              \
-       __MUX(_id, NULL, cname, pnames, o, s, w, f, mf, NULL)
-
-#define MUX_FA(_id, cname, pnames, o, s, w, f, mf, a)          \
-       __MUX(_id, NULL, cname, pnames, o, s, w, f, mf, a)
+       __MUX(_id, cname, pnames, o, s, w, f, mf)
 
 /**
  * @id: platform specific id of the clock.
  * struct samsung_div_clock: information about div clock
- * @dev_name: name of the device to which this clock belongs.
  * @name: name of this div clock.
  * @parent_name: name of the parent clock.
  * @flags: optional flags for basic clock.
  * @offset: offset of the register for configuring the div.
  * @shift: starting bit location of the div control bit-field in @reg.
  * @div_flags: flags for div-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
  */
 struct samsung_div_clock {
        unsigned int            id;
-       const char              *dev_name;
        const char              *name;
        const char              *parent_name;
        unsigned long           flags;
@@ -180,14 +166,12 @@ struct samsung_div_clock {
        u8                      shift;
        u8                      width;
        u8                      div_flags;
-       const char              *alias;
        struct clk_div_table    *table;
 };
 
-#define __DIV(_id, dname, cname, pname, o, s, w, f, df, a, t)  \
+#define __DIV(_id, cname, pname, o, s, w, f, df, t)    \
        {                                                       \
                .id             = _id,                          \
-               .dev_name       = dname,                        \
                .name           = cname,                        \
                .parent_name    = pname,                        \
                .flags          = f,                            \
@@ -195,70 +179,51 @@ struct samsung_div_clock {
                .shift          = s,                            \
                .width          = w,                            \
                .div_flags      = df,                           \
-               .alias          = a,                            \
                .table          = t,                            \
        }
 
 #define DIV(_id, cname, pname, o, s, w)                                \
-       __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, NULL)
-
-#define DIV_A(_id, cname, pname, o, s, w, a)                   \
-       __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, a, NULL)
+       __DIV(_id, cname, pname, o, s, w, 0, 0, NULL)
 
 #define DIV_F(_id, cname, pname, o, s, w, f, df)               \
-       __DIV(_id, NULL, cname, pname, o, s, w, f, df, NULL, NULL)
+       __DIV(_id, cname, pname, o, s, w, f, df, NULL)
 
 #define DIV_T(_id, cname, pname, o, s, w, t)                   \
-       __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, t)
+       __DIV(_id, cname, pname, o, s, w, 0, 0, t)
 
 /**
  * struct samsung_gate_clock: information about gate clock
  * @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
  * @name: name of this gate clock.
  * @parent_name: name of the parent clock.
  * @flags: optional flags for basic clock.
  * @offset: offset of the register for configuring the gate.
  * @bit_idx: bit index of the gate control bit-field in @reg.
  * @gate_flags: flags for gate-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
  */
 struct samsung_gate_clock {
        unsigned int            id;
-       const char              *dev_name;
        const char              *name;
        const char              *parent_name;
        unsigned long           flags;
        unsigned long           offset;
        u8                      bit_idx;
        u8                      gate_flags;
-       const char              *alias;
 };
 
-#define __GATE(_id, dname, cname, pname, o, b, f, gf, a)       \
+#define __GATE(_id, cname, pname, o, b, f, gf)                 \
        {                                                       \
                .id             = _id,                          \
-               .dev_name       = dname,                        \
                .name           = cname,                        \
                .parent_name    = pname,                        \
                .flags          = f,                            \
                .offset         = o,                            \
                .bit_idx        = b,                            \
                .gate_flags     = gf,                           \
-               .alias          = a,                            \
        }
 
 #define GATE(_id, cname, pname, o, b, f, gf)                   \
-       __GATE(_id, NULL, cname, pname, o, b, f, gf, NULL)
-
-#define GATE_A(_id, cname, pname, o, b, f, gf, a)              \
-       __GATE(_id, NULL, cname, pname, o, b, f, gf, a)
-
-#define GATE_D(_id, dname, cname, pname, o, b, f, gf)          \
-       __GATE(_id, dname, cname, pname, o, b, f, gf, NULL)
-
-#define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a)      \
-       __GATE(_id, dname, cname, pname, o, b, f, gf, a)
+       __GATE(_id, cname, pname, o, b, f, gf)
 
 #define PNAME(x) static const char * const x[] __initconst
 
@@ -275,18 +240,15 @@ struct samsung_clk_reg_dump {
 /**
  * struct samsung_pll_clock: information about pll clock
  * @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
  * @name: name of this pll clock.
  * @parent_name: name of the parent clock.
  * @flags: optional flags for basic clock.
  * @con_offset: offset of the register for configuring the PLL.
  * @lock_offset: offset of the register for locking the PLL.
  * @type: Type of PLL to be registered.
- * @alias: optional clock alias name to be assigned to this clock.
  */
 struct samsung_pll_clock {
        unsigned int            id;
-       const char              *dev_name;
        const char              *name;
        const char              *parent_name;
        unsigned long           flags;
@@ -294,31 +256,23 @@ struct samsung_pll_clock {
        int                     lock_offset;
        enum samsung_pll_type   type;
        const struct samsung_pll_rate_table *rate_table;
-       const char              *alias;
 };
 
-#define __PLL(_typ, _id, _dname, _name, _pname, _flags, _lock, _con,   \
-               _rtable, _alias)                                        \
+#define __PLL(_typ, _id, _name, _pname, _flags, _lock, _con, _rtable)  \
        {                                                               \
                .id             = _id,                                  \
                .type           = _typ,                                 \
-               .dev_name       = _dname,                               \
                .name           = _name,                                \
                .parent_name    = _pname,                               \
-               .flags          = CLK_GET_RATE_NOCACHE,                 \
+               .flags          = _flags,                               \
                .con_offset     = _con,                                 \
                .lock_offset    = _lock,                                \
                .rate_table     = _rtable,                              \
-               .alias          = _alias,                               \
        }
 
 #define PLL(_typ, _id, _name, _pname, _lock, _con, _rtable)    \
-       __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE,     \
-               _lock, _con, _rtable, _name)
-
-#define PLL_A(_typ, _id, _name, _pname, _lock, _con, _alias, _rtable) \
-       __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE,     \
-               _lock, _con, _rtable, _alias)
+       __PLL(_typ, _id, _name, _pname, CLK_GET_RATE_NOCACHE, _lock,    \
+             _con, _rtable)
 
 struct samsung_clock_reg_cache {
        struct list_head node;
@@ -352,6 +306,12 @@ struct samsung_cmu_info {
        /* list and number of clocks registers */
        const unsigned long *clk_regs;
        unsigned int nr_clk_regs;
+
+       /* list and number of clocks registers to set before suspend */
+       const struct samsung_clk_reg_dump *suspend_regs;
+       unsigned int nr_suspend_regs;
+       /* name of the parent clock needed for CMU register access */
+       const char *clk_name;
 };
 
 extern struct samsung_clk_provider *__init samsung_clk_init(
index 665fa681b2e1e9a2ccb2f2f857d76e98c3e3da64..0cd11e6893afa282c0d9c32bb386fcae5e21e065 100644 (file)
@@ -42,7 +42,7 @@ static struct clk_dmn clk_mmc45 = {
        },
 };
 
-static struct clk_init_data clk_nand_init = {
+static const struct clk_init_data clk_nand_init = {
        .name = "nand",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
index d0c6c9a2d06ae7d87cc5fe6b1d5866e33508e314..be012b4bab46263f92f0e547f673f43945060f6e 100644 (file)
@@ -392,7 +392,7 @@ static const char * const pll_clk_parents[] = {
        "xin",
 };
 
-static struct clk_init_data clk_cpupll_init = {
+static const struct clk_init_data clk_cpupll_init = {
        .name = "cpupll_vco",
        .ops = &ab_pll_ops,
        .parent_names = pll_clk_parents,
@@ -406,7 +406,7 @@ static struct clk_pll clk_cpupll = {
        },
 };
 
-static struct clk_init_data clk_mempll_init = {
+static const struct clk_init_data clk_mempll_init = {
        .name = "mempll_vco",
        .ops = &ab_pll_ops,
        .parent_names = pll_clk_parents,
@@ -420,7 +420,7 @@ static struct clk_pll clk_mempll = {
        },
 };
 
-static struct clk_init_data clk_sys0pll_init = {
+static const struct clk_init_data clk_sys0pll_init = {
        .name = "sys0pll_vco",
        .ops = &ab_pll_ops,
        .parent_names = pll_clk_parents,
@@ -434,7 +434,7 @@ static struct clk_pll clk_sys0pll = {
        },
 };
 
-static struct clk_init_data clk_sys1pll_init = {
+static const struct clk_init_data clk_sys1pll_init = {
        .name = "sys1pll_vco",
        .ops = &ab_pll_ops,
        .parent_names = pll_clk_parents,
@@ -448,7 +448,7 @@ static struct clk_pll clk_sys1pll = {
        },
 };
 
-static struct clk_init_data clk_sys2pll_init = {
+static const struct clk_init_data clk_sys2pll_init = {
        .name = "sys2pll_vco",
        .ops = &ab_pll_ops,
        .parent_names = pll_clk_parents,
@@ -462,7 +462,7 @@ static struct clk_pll clk_sys2pll = {
        },
 };
 
-static struct clk_init_data clk_sys3pll_init = {
+static const struct clk_init_data clk_sys3pll_init = {
        .name = "sys3pll_vco",
        .ops = &ab_pll_ops,
        .parent_names = pll_clk_parents,
@@ -596,7 +596,7 @@ static const char * const audiodto_clk_parents[] = {
        "sys3pll_clk1",
 };
 
-static struct clk_init_data clk_audiodto_init = {
+static const struct clk_init_data clk_audiodto_init = {
        .name = "audio_dto",
        .ops = &dto_ops,
        .parent_names = audiodto_clk_parents,
@@ -617,7 +617,7 @@ static const char * const disp0dto_clk_parents[] = {
        "sys3pll_clk1",
 };
 
-static struct clk_init_data clk_disp0dto_init = {
+static const struct clk_init_data clk_disp0dto_init = {
        .name = "disp0_dto",
        .ops = &dto_ops,
        .parent_names = disp0dto_clk_parents,
@@ -638,7 +638,7 @@ static const char * const disp1dto_clk_parents[] = {
        "sys3pll_clk1",
 };
 
-static struct clk_init_data clk_disp1dto_init = {
+static const struct clk_init_data clk_disp1dto_init = {
        .name = "disp1_dto",
        .ops = &dto_ops,
        .parent_names = disp1dto_clk_parents,
index 77e1e2491689b9c37e943c43e8fabf5778a561aa..d8f9efa5129adf4d7e9731ba1dd7967f71d9c946 100644 (file)
@@ -184,7 +184,7 @@ static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
        return clk_hw_get_rate(parent_clk);
 }
 
-static struct clk_ops std_pll_ops = {
+static const struct clk_ops std_pll_ops = {
        .recalc_rate = pll_clk_recalc_rate,
        .round_rate = pll_clk_round_rate,
        .set_rate = pll_clk_set_rate,
@@ -194,21 +194,21 @@ static const char * const pll_clk_parents[] = {
        "osc",
 };
 
-static struct clk_init_data clk_pll1_init = {
+static const struct clk_init_data clk_pll1_init = {
        .name = "pll1",
        .ops = &std_pll_ops,
        .parent_names = pll_clk_parents,
        .num_parents = ARRAY_SIZE(pll_clk_parents),
 };
 
-static struct clk_init_data clk_pll2_init = {
+static const struct clk_init_data clk_pll2_init = {
        .name = "pll2",
        .ops = &std_pll_ops,
        .parent_names = pll_clk_parents,
        .num_parents = ARRAY_SIZE(pll_clk_parents),
 };
 
-static struct clk_init_data clk_pll3_init = {
+static const struct clk_init_data clk_pll3_init = {
        .name = "pll3",
        .ops = &std_pll_ops,
        .parent_names = pll_clk_parents,
@@ -265,13 +265,13 @@ static unsigned long usb_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long pa
        return (reg & SIRFSOC_USBPHY_PLL_BYPASS) ? parent_rate : 48*MHZ;
 }
 
-static struct clk_ops usb_pll_ops = {
+static const struct clk_ops usb_pll_ops = {
        .enable = usb_pll_clk_enable,
        .disable = usb_pll_clk_disable,
        .recalc_rate = usb_pll_clk_recalc_rate,
 };
 
-static struct clk_init_data clk_usb_pll_init = {
+static const struct clk_init_data clk_usb_pll_init = {
        .name = "usb_pll",
        .ops = &usb_pll_ops,
        .parent_names = pll_clk_parents,
@@ -437,7 +437,7 @@ static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
        return ret2 ? ret2 : ret1;
 }
 
-static struct clk_ops msi_ops = {
+static const struct clk_ops msi_ops = {
        .set_rate = dmn_clk_set_rate,
        .round_rate = dmn_clk_round_rate,
        .recalc_rate = dmn_clk_recalc_rate,
@@ -445,7 +445,7 @@ static struct clk_ops msi_ops = {
        .get_parent = dmn_clk_get_parent,
 };
 
-static struct clk_init_data clk_mem_init = {
+static const struct clk_init_data clk_mem_init = {
        .name = "mem",
        .ops = &msi_ops,
        .parent_names = dmn_clk_parents,
@@ -459,7 +459,7 @@ static struct clk_dmn clk_mem = {
        },
 };
 
-static struct clk_init_data clk_sys_init = {
+static const struct clk_init_data clk_sys_init = {
        .name = "sys",
        .ops = &msi_ops,
        .parent_names = dmn_clk_parents,
@@ -474,7 +474,7 @@ static struct clk_dmn clk_sys = {
        },
 };
 
-static struct clk_init_data clk_io_init = {
+static const struct clk_init_data clk_io_init = {
        .name = "io",
        .ops = &msi_ops,
        .parent_names = dmn_clk_parents,
@@ -488,7 +488,7 @@ static struct clk_dmn clk_io = {
        },
 };
 
-static struct clk_ops cpu_ops = {
+static const struct clk_ops cpu_ops = {
        .set_parent = dmn_clk_set_parent,
        .get_parent = dmn_clk_get_parent,
        .set_rate = cpu_clk_set_rate,
@@ -496,7 +496,7 @@ static struct clk_ops cpu_ops = {
        .recalc_rate = cpu_clk_recalc_rate,
 };
 
-static struct clk_init_data clk_cpu_init = {
+static const struct clk_init_data clk_cpu_init = {
        .name = "cpu",
        .ops = &cpu_ops,
        .parent_names = dmn_clk_parents,
@@ -511,7 +511,7 @@ static struct clk_dmn clk_cpu = {
        },
 };
 
-static struct clk_ops dmn_ops = {
+static const struct clk_ops dmn_ops = {
        .is_enabled = std_clk_is_enabled,
        .enable = std_clk_enable,
        .disable = std_clk_disable,
@@ -524,7 +524,7 @@ static struct clk_ops dmn_ops = {
 
 /* dsp, gfx, mm, lcd and vpp domain */
 
-static struct clk_init_data clk_dsp_init = {
+static const struct clk_init_data clk_dsp_init = {
        .name = "dsp",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
@@ -539,7 +539,7 @@ static struct clk_dmn clk_dsp = {
        },
 };
 
-static struct clk_init_data clk_gfx_init = {
+static const struct clk_init_data clk_gfx_init = {
        .name = "gfx",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
@@ -554,7 +554,7 @@ static struct clk_dmn clk_gfx = {
        },
 };
 
-static struct clk_init_data clk_mm_init = {
+static const struct clk_init_data clk_mm_init = {
        .name = "mm",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
@@ -574,7 +574,7 @@ static struct clk_dmn clk_mm = {
  */
 #define clk_gfx2d clk_mm
 
-static struct clk_init_data clk_lcd_init = {
+static const struct clk_init_data clk_lcd_init = {
        .name = "lcd",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
@@ -589,7 +589,7 @@ static struct clk_dmn clk_lcd = {
        },
 };
 
-static struct clk_init_data clk_vpp_init = {
+static const struct clk_init_data clk_vpp_init = {
        .name = "vpp",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
@@ -604,21 +604,21 @@ static struct clk_dmn clk_vpp = {
        },
 };
 
-static struct clk_init_data clk_mmc01_init = {
+static const struct clk_init_data clk_mmc01_init = {
        .name = "mmc01",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
        .num_parents = ARRAY_SIZE(dmn_clk_parents),
 };
 
-static struct clk_init_data clk_mmc23_init = {
+static const struct clk_init_data clk_mmc23_init = {
        .name = "mmc23",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
        .num_parents = ARRAY_SIZE(dmn_clk_parents),
 };
 
-static struct clk_init_data clk_mmc45_init = {
+static const struct clk_init_data clk_mmc45_init = {
        .name = "mmc45",
        .ops = &dmn_ops,
        .parent_names = dmn_clk_parents,
@@ -679,13 +679,13 @@ static const char * const std_clk_io_parents[] = {
        "io",
 };
 
-static struct clk_ops ios_ops = {
+static const struct clk_ops ios_ops = {
        .is_enabled = std_clk_is_enabled,
        .enable = std_clk_enable,
        .disable = std_clk_disable,
 };
 
-static struct clk_init_data clk_cphif_init = {
+static const struct clk_init_data clk_cphif_init = {
        .name = "cphif",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -699,7 +699,7 @@ static struct clk_std clk_cphif = {
        },
 };
 
-static struct clk_init_data clk_dmac0_init = {
+static const struct clk_init_data clk_dmac0_init = {
        .name = "dmac0",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -713,7 +713,7 @@ static struct clk_std clk_dmac0 = {
        },
 };
 
-static struct clk_init_data clk_dmac1_init = {
+static const struct clk_init_data clk_dmac1_init = {
        .name = "dmac1",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -727,7 +727,7 @@ static struct clk_std clk_dmac1 = {
        },
 };
 
-static struct clk_init_data clk_audio_init = {
+static const struct clk_init_data clk_audio_init = {
        .name = "audio",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -741,7 +741,7 @@ static struct clk_std clk_audio = {
        },
 };
 
-static struct clk_init_data clk_uart0_init = {
+static const struct clk_init_data clk_uart0_init = {
        .name = "uart0",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -755,7 +755,7 @@ static struct clk_std clk_uart0 = {
        },
 };
 
-static struct clk_init_data clk_uart1_init = {
+static const struct clk_init_data clk_uart1_init = {
        .name = "uart1",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -769,7 +769,7 @@ static struct clk_std clk_uart1 = {
        },
 };
 
-static struct clk_init_data clk_uart2_init = {
+static const struct clk_init_data clk_uart2_init = {
        .name = "uart2",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -783,7 +783,7 @@ static struct clk_std clk_uart2 = {
        },
 };
 
-static struct clk_init_data clk_usp0_init = {
+static const struct clk_init_data clk_usp0_init = {
        .name = "usp0",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -797,7 +797,7 @@ static struct clk_std clk_usp0 = {
        },
 };
 
-static struct clk_init_data clk_usp1_init = {
+static const struct clk_init_data clk_usp1_init = {
        .name = "usp1",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -811,7 +811,7 @@ static struct clk_std clk_usp1 = {
        },
 };
 
-static struct clk_init_data clk_usp2_init = {
+static const struct clk_init_data clk_usp2_init = {
        .name = "usp2",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -825,7 +825,7 @@ static struct clk_std clk_usp2 = {
        },
 };
 
-static struct clk_init_data clk_vip_init = {
+static const struct clk_init_data clk_vip_init = {
        .name = "vip",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -839,7 +839,7 @@ static struct clk_std clk_vip = {
        },
 };
 
-static struct clk_init_data clk_spi0_init = {
+static const struct clk_init_data clk_spi0_init = {
        .name = "spi0",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -853,7 +853,7 @@ static struct clk_std clk_spi0 = {
        },
 };
 
-static struct clk_init_data clk_spi1_init = {
+static const struct clk_init_data clk_spi1_init = {
        .name = "spi1",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -867,7 +867,7 @@ static struct clk_std clk_spi1 = {
        },
 };
 
-static struct clk_init_data clk_tsc_init = {
+static const struct clk_init_data clk_tsc_init = {
        .name = "tsc",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -881,7 +881,7 @@ static struct clk_std clk_tsc = {
        },
 };
 
-static struct clk_init_data clk_i2c0_init = {
+static const struct clk_init_data clk_i2c0_init = {
        .name = "i2c0",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -895,7 +895,7 @@ static struct clk_std clk_i2c0 = {
        },
 };
 
-static struct clk_init_data clk_i2c1_init = {
+static const struct clk_init_data clk_i2c1_init = {
        .name = "i2c1",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -909,7 +909,7 @@ static struct clk_std clk_i2c1 = {
        },
 };
 
-static struct clk_init_data clk_pwmc_init = {
+static const struct clk_init_data clk_pwmc_init = {
        .name = "pwmc",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -923,7 +923,7 @@ static struct clk_std clk_pwmc = {
        },
 };
 
-static struct clk_init_data clk_efuse_init = {
+static const struct clk_init_data clk_efuse_init = {
        .name = "efuse",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -937,7 +937,7 @@ static struct clk_std clk_efuse = {
        },
 };
 
-static struct clk_init_data clk_pulse_init = {
+static const struct clk_init_data clk_pulse_init = {
        .name = "pulse",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -955,7 +955,7 @@ static const char * const std_clk_dsp_parents[] = {
        "dsp",
 };
 
-static struct clk_init_data clk_gps_init = {
+static const struct clk_init_data clk_gps_init = {
        .name = "gps",
        .ops = &ios_ops,
        .parent_names = std_clk_dsp_parents,
@@ -969,7 +969,7 @@ static struct clk_std clk_gps = {
        },
 };
 
-static struct clk_init_data clk_mf_init = {
+static const struct clk_init_data clk_mf_init = {
        .name = "mf",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
@@ -987,7 +987,7 @@ static const char * const std_clk_sys_parents[] = {
        "sys",
 };
 
-static struct clk_init_data clk_security_init = {
+static const struct clk_init_data clk_security_init = {
        .name = "security",
        .ops = &ios_ops,
        .parent_names = std_clk_sys_parents,
@@ -1005,7 +1005,7 @@ static const char * const std_clk_usb_parents[] = {
        "usb_pll",
 };
 
-static struct clk_init_data clk_usb0_init = {
+static const struct clk_init_data clk_usb0_init = {
        .name = "usb0",
        .ops = &ios_ops,
        .parent_names = std_clk_usb_parents,
@@ -1019,7 +1019,7 @@ static struct clk_std clk_usb0 = {
        },
 };
 
-static struct clk_init_data clk_usb1_init = {
+static const struct clk_init_data clk_usb1_init = {
        .name = "usb1",
        .ops = &ios_ops,
        .parent_names = std_clk_usb_parents,
index aac1c8ec151a9aff5a962e447aeb46bf680f269b..2f824320c3180c869d5584edace0ab029d36bba5 100644 (file)
@@ -42,7 +42,7 @@ static struct clk_dmn clk_mmc45 = {
        },
 };
 
-static struct clk_init_data clk_nand_init = {
+static const struct clk_init_data clk_nand_init = {
        .name = "nand",
        .ops = &ios_ops,
        .parent_names = std_clk_io_parents,
index f271c350ef9404838fcaa6502db28cd1db5a744c..906410413bc149d7e33e361fbb9874ed473d1bdc 100644 (file)
@@ -29,7 +29,7 @@
 
 #define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
 
-static struct aux_clk_masks default_aux_masks = {
+static const  struct aux_clk_masks default_aux_masks = {
        .eq_sel_mask = AUX_EQ_SEL_MASK,
        .eq_sel_shift = AUX_EQ_SEL_SHIFT,
        .eq1_mask = AUX_EQ1_SEL,
@@ -128,7 +128,7 @@ static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
        return 0;
 }
 
-static struct clk_ops clk_aux_ops = {
+static const struct clk_ops clk_aux_ops = {
        .recalc_rate = clk_aux_recalc_rate,
        .round_rate = clk_aux_round_rate,
        .set_rate = clk_aux_set_rate,
@@ -136,7 +136,7 @@ static struct clk_ops clk_aux_ops = {
 
 struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
                const char *parent_name, unsigned long flags, void __iomem *reg,
-               struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+               const struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
                u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
 {
        struct clk_aux *aux;
@@ -149,10 +149,8 @@ struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
        }
 
        aux = kzalloc(sizeof(*aux), GFP_KERNEL);
-       if (!aux) {
-               pr_err("could not allocate aux clk\n");
+       if (!aux)
                return ERR_PTR(-ENOMEM);
-       }
 
        /* struct clk_aux assignments */
        if (!masks)
index 58d678b5b40a76c6e426b41ed957e044197b2ae8..229c96daece62d311f03cc86b91f75ffa391b6ca 100644 (file)
@@ -116,7 +116,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
        return 0;
 }
 
-static struct clk_ops clk_frac_ops = {
+static const struct clk_ops clk_frac_ops = {
        .recalc_rate = clk_frac_recalc_rate,
        .round_rate = clk_frac_round_rate,
        .set_rate = clk_frac_set_rate,
@@ -136,10 +136,8 @@ struct clk *clk_register_frac(const char *name, const char *parent_name,
        }
 
        frac = kzalloc(sizeof(*frac), GFP_KERNEL);
-       if (!frac) {
-               pr_err("could not allocate frac clk\n");
+       if (!frac)
                return ERR_PTR(-ENOMEM);
-       }
 
        /* struct clk_frac assignments */
        frac->reg = reg;
index 1a722e99e76e949352ab03120440ec3e312c365d..28262f4225628742db10ec13de492b87cc0171bf 100644 (file)
@@ -105,7 +105,7 @@ static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
        return 0;
 }
 
-static struct clk_ops clk_gpt_ops = {
+static const struct clk_ops clk_gpt_ops = {
        .recalc_rate = clk_gpt_recalc_rate,
        .round_rate = clk_gpt_round_rate,
        .set_rate = clk_gpt_set_rate,
@@ -125,10 +125,8 @@ struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
        }
 
        gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
-       if (!gpt) {
-               pr_err("could not allocate gpt clk\n");
+       if (!gpt)
                return ERR_PTR(-ENOMEM);
-       }
 
        /* struct clk_gpt assignments */
        gpt->reg = reg;
index dc21ca4601aab28ce2eba866e2d00e0e33f9dbe1..c08dec30bfa629dd5599822f74f0054be114b0e4 100644 (file)
@@ -165,7 +165,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
        return 0;
 }
 
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops clk_pll_ops = {
        .recalc_rate = clk_pll_recalc_rate,
        .round_rate = clk_pll_round_rate,
        .set_rate = clk_pll_set_rate,
@@ -266,7 +266,7 @@ static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
        return 0;
 }
 
-static struct clk_ops clk_vco_ops = {
+static const struct clk_ops clk_vco_ops = {
        .recalc_rate = clk_vco_recalc_rate,
        .round_rate = clk_vco_round_rate,
        .set_rate = clk_vco_set_rate,
@@ -292,16 +292,12 @@ struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
        }
 
        vco = kzalloc(sizeof(*vco), GFP_KERNEL);
-       if (!vco) {
-               pr_err("could not allocate vco clk\n");
+       if (!vco)
                return ERR_PTR(-ENOMEM);
-       }
 
        pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-       if (!pll) {
-               pr_err("could not allocate pll clk\n");
+       if (!pll)
                goto free_vco;
-       }
 
        /* struct clk_vco assignments */
        vco->mode_reg = mode_reg;
index 9834944f08b1dafff2a24835a5b1e57b010e3e4a..af0e25f496c1a112ac3dc89d9f3b8e14aea3fd32 100644 (file)
@@ -49,7 +49,7 @@ struct aux_rate_tbl {
 struct clk_aux {
        struct                  clk_hw hw;
        void __iomem            *reg;
-       struct aux_clk_masks    *masks;
+       const struct aux_clk_masks *masks;
        struct aux_rate_tbl     *rtbl;
        u8                      rtbl_cnt;
        spinlock_t              *lock;
@@ -112,7 +112,7 @@ typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
 /* clk register routines */
 struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
                const char *parent_name, unsigned long flags, void __iomem *reg,
-               struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+               const struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
                u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
 struct clk *clk_register_frac(const char *name, const char *parent_name,
                unsigned long flags, void __iomem *reg,
index 2f86e3f94efa6f2fe306fbbecd581bd70e4b392e..591248c9a88e797295a712abcb9b26b6d00a95e9 100644 (file)
@@ -284,7 +284,7 @@ static struct frac_rate_tbl clcd_rtbl[] = {
 };
 
 /* i2s prescaler1 masks */
-static struct aux_clk_masks i2s_prs1_masks = {
+static const struct aux_clk_masks i2s_prs1_masks = {
        .eq_sel_mask = AUX_EQ_SEL_MASK,
        .eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
        .eq1_mask = AUX_EQ1_SEL,
index cbb19a90f2d614569f48ad7ffec1c287714e5810..e5bc8c828cf0d84a420fd087ae726d961f5f05c2 100644 (file)
@@ -323,7 +323,7 @@ static struct frac_rate_tbl clcd_rtbl[] = {
 };
 
 /* i2s prescaler1 masks */
-static struct aux_clk_masks i2s_prs1_masks = {
+static const struct aux_clk_masks i2s_prs1_masks = {
        .eq_sel_mask = AUX_EQ_SEL_MASK,
        .eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
        .eq1_mask = AUX_EQ1_SEL,
index d1c2fa93ddd9bcf782829245545c35db05cfd06d..4141c3fe08ae1eaccb3ca77d4d1989649c3ac718 100644 (file)
@@ -11,6 +11,7 @@ lib-$(CONFIG_SUNXI_CCU)               += ccu_gate.o
 lib-$(CONFIG_SUNXI_CCU)                += ccu_mux.o
 lib-$(CONFIG_SUNXI_CCU)                += ccu_mult.o
 lib-$(CONFIG_SUNXI_CCU)                += ccu_phase.o
+lib-$(CONFIG_SUNXI_CCU)                += ccu_sdm.o
 
 # Multi-factor clocks
 lib-$(CONFIG_SUNXI_CCU)                += ccu_nk.o
index 286b0049b7b604e461c8c2d1c85b13addf161686..ffa5dac221e471f95cf4c16dd4c6cb759eb6872a 100644 (file)
@@ -28,6 +28,7 @@
 #include "ccu_nkmp.h"
 #include "ccu_nm.h"
 #include "ccu_phase.h"
+#include "ccu_sdm.h"
 
 #include "ccu-sun4i-a10.h"
 
@@ -51,16 +52,29 @@ static struct ccu_nkmp pll_core_clk = {
  * the base (2x, 4x and 8x), and one variable divider (the one true
  * pll audio).
  *
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names.
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
  */
 #define SUN4I_PLL_AUDIO_REG    0x008
+
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+       { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+       { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
 static struct ccu_nm pll_audio_base_clk = {
        .enable         = BIT(31),
        .n              = _SUNXI_CCU_MULT_OFFSET(8, 7, 0),
        .m              = _SUNXI_CCU_DIV_OFFSET(0, 5, 0),
+       .sdm            = _SUNXI_CCU_SDM(pll_audio_sdm_table, 0,
+                                        0x00c, BIT(31)),
        .common         = {
                .reg            = 0x008,
+               .features       = CCU_FEATURE_SIGMA_DELTA_MOD,
                .hw.init        = CLK_HW_INIT("pll-audio-base",
                                              "hosc",
                                              &ccu_nm_ops,
@@ -223,7 +237,7 @@ static struct ccu_mux cpu_clk = {
                .hw.init        = CLK_HW_INIT_PARENTS("cpu",
                                                      cpu_parents,
                                                      &ccu_mux_ops,
-                                                     CLK_IS_CRITICAL),
+                                                     CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
        }
 };
 
@@ -1021,9 +1035,9 @@ static struct ccu_common *sun4i_sun7i_ccu_clks[] = {
        &out_b_clk.common
 };
 
-/* Post-divider for pll-audio is hardcoded to 4 */
+/* Post-divider for pll-audio is hardcoded to 1 */
 static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
-                       "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+                       "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
                        "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1420,10 +1434,10 @@ static void __init sun4i_ccu_init(struct device_node *node,
                return;
        }
 
-       /* Force the PLL-Audio-1x divider to 4 */
+       /* Force the PLL-Audio-1x divider to 1 */
        val = readl(reg + SUN4I_PLL_AUDIO_REG);
        val &= ~GENMASK(29, 26);
-       writel(val | (4 << 26), reg + SUN4I_PLL_AUDIO_REG);
+       writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG);
 
        /*
         * Use the peripheral PLL6 as the AHB parent, instead of CPU /
index c5947c7c050e79bd7123769b6aa75840aaabbaf2..23c908ad509fd80029bc8fe722e4f808a810c77d 100644 (file)
@@ -29,7 +29,7 @@
 #define CLK_PLL_AUDIO_4X       6
 #define CLK_PLL_AUDIO_8X       7
 #define CLK_PLL_VIDEO0         8
-#define CLK_PLL_VIDEO0_2X      9
+/* The PLL_VIDEO0_2X clock is exported */
 #define CLK_PLL_VE             10
 #define CLK_PLL_DDR_BASE       11
 #define CLK_PLL_DDR            12
@@ -38,7 +38,7 @@
 #define CLK_PLL_PERIPH         15
 #define CLK_PLL_PERIPH_SATA    16
 #define CLK_PLL_VIDEO1         17
-#define CLK_PLL_VIDEO1_2X      18
+/* The PLL_VIDEO1_2X clock is exported */
 #define CLK_PLL_GPU            19
 
 /* The CPU clock is exported */
index ab9e850b370783259cde6b5aa7fa0b9cb05c6daa..fa2c2dd771021b05ff7782bd514d108b23078d1d 100644 (file)
@@ -26,6 +26,7 @@
 #include "ccu_nkmp.h"
 #include "ccu_nm.h"
 #include "ccu_phase.h"
+#include "ccu_sdm.h"
 
 #include "ccu-sun5i.h"
 
@@ -49,11 +50,20 @@ static struct ccu_nkmp pll_core_clk = {
  * the base (2x, 4x and 8x), and one variable divider (the one true
  * pll audio).
  *
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
  */
 #define SUN5I_PLL_AUDIO_REG    0x008
 
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+       { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+       { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
 static struct ccu_nm pll_audio_base_clk = {
        .enable         = BIT(31),
        .n              = _SUNXI_CCU_MULT_OFFSET(8, 7, 0),
@@ -63,8 +73,11 @@ static struct ccu_nm pll_audio_base_clk = {
         * offset
         */
        .m              = _SUNXI_CCU_DIV_OFFSET(0, 5, 0),
+       .sdm            = _SUNXI_CCU_SDM(pll_audio_sdm_table, 0,
+                                        0x00c, BIT(31)),
        .common         = {
                .reg            = 0x008,
+               .features       = CCU_FEATURE_SIGMA_DELTA_MOD,
                .hw.init        = CLK_HW_INIT("pll-audio-base",
                                              "hosc",
                                              &ccu_nm_ops,
@@ -597,9 +610,9 @@ static struct ccu_common *sun5i_a10s_ccu_clks[] = {
        &iep_clk.common,
 };
 
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
 static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
-                       "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+                       "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
                        "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -980,10 +993,10 @@ static void __init sun5i_ccu_init(struct device_node *node,
                return;
        }
 
-       /* Force the PLL-Audio-1x divider to 4 */
+       /* Force the PLL-Audio-1x divider to 1 */
        val = readl(reg + SUN5I_PLL_AUDIO_REG);
-       val &= ~GENMASK(19, 16);
-       writel(val | (3 << 16), reg + SUN5I_PLL_AUDIO_REG);
+       val &= ~GENMASK(29, 26);
+       writel(val | (0 << 26), reg + SUN5I_PLL_AUDIO_REG);
 
        /*
         * Use the peripheral PLL as the AHB parent, instead of CPU /
index 8af434815fba9f284154c92aa7dc55e26ec96981..72b16ed1012b1e78e272b0ca06fee32a8a49aed0 100644 (file)
@@ -31,6 +31,7 @@
 #include "ccu_nkmp.h"
 #include "ccu_nm.h"
 #include "ccu_phase.h"
+#include "ccu_sdm.h"
 
 #include "ccu-sun6i-a31.h"
 
@@ -48,18 +49,29 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_cpu_clk, "pll-cpu",
  * the base (2x, 4x and 8x), and one variable divider (the one true
  * pll audio).
  *
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
  */
 #define SUN6I_A31_PLL_AUDIO_REG        0x008
 
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
-                                  "osc24M", 0x008,
-                                  8, 7,        /* N */
-                                  0, 5,        /* M */
-                                  BIT(31),     /* gate */
-                                  BIT(28),     /* lock */
-                                  CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+       { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+       { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+                                      "osc24M", 0x008,
+                                      8, 7,    /* N */
+                                      0, 5,    /* M */
+                                      pll_audio_sdm_table, BIT(24),
+                                      0x284, BIT(31),
+                                      BIT(31), /* gate */
+                                      BIT(28), /* lock */
+                                      CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
                                        "osc24M", 0x010,
@@ -608,7 +620,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
                                 0x150, 0, 4, 24, 2, BIT(31),
                                 CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0);
 
 static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
 
@@ -950,9 +962,9 @@ static struct ccu_common *sun6i_a31_ccu_clks[] = {
        &out_c_clk.common,
 };
 
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
 static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
-                       "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+                       "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
                        "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1221,10 +1233,10 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
                return;
        }
 
-       /* Force the PLL-Audio-1x divider to 4 */
+       /* Force the PLL-Audio-1x divider to 1 */
        val = readl(reg + SUN6I_A31_PLL_AUDIO_REG);
        val &= ~GENMASK(19, 16);
-       writel(val | (3 << 16), reg + SUN6I_A31_PLL_AUDIO_REG);
+       writel(val | (0 << 16), reg + SUN6I_A31_PLL_AUDIO_REG);
 
        /* Force PLL-MIPI to MIPI mode */
        val = readl(reg + SUN6I_A31_PLL_MIPI_REG);
index 4e434011e9e795e390641b38be3a262151ae1ebf..27e6ad4133ab462df78ee1c5d36c41a1d536b753 100644 (file)
@@ -27,7 +27,9 @@
 #define CLK_PLL_AUDIO_4X       4
 #define CLK_PLL_AUDIO_8X       5
 #define CLK_PLL_VIDEO0         6
-#define CLK_PLL_VIDEO0_2X      7
+
+/* The PLL_VIDEO0_2X clock is exported */
+
 #define CLK_PLL_VE             8
 #define CLK_PLL_DDR            9
 
@@ -35,7 +37,9 @@
 
 #define CLK_PLL_PERIPH_2X      11
 #define CLK_PLL_VIDEO1         12
-#define CLK_PLL_VIDEO1_2X      13
+
+/* The PLL_VIDEO1_2X clock is exported */
+
 #define CLK_PLL_GPU            14
 #define CLK_PLL_MIPI           15
 #define CLK_PLL9               16
index d93b452f0df9752aa348323821a5de0e20cd51c3..a4fa2945f2302e8f3a41632b456db28018e0069b 100644 (file)
@@ -26,6 +26,7 @@
 #include "ccu_nkmp.h"
 #include "ccu_nm.h"
 #include "ccu_phase.h"
+#include "ccu_sdm.h"
 
 #include "ccu-sun8i-a23-a33.h"
 
@@ -52,18 +53,29 @@ static struct ccu_nkmp pll_cpux_clk = {
  * the base (2x, 4x and 8x), and one variable divider (the one true
  * pll audio).
  *
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
  */
 #define SUN8I_A23_PLL_AUDIO_REG        0x008
 
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
-                                  "osc24M", 0x008,
-                                  8, 7,                /* N */
-                                  0, 5,                /* M */
-                                  BIT(31),             /* gate */
-                                  BIT(28),             /* lock */
-                                  CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+       { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+       { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+                                      "osc24M", 0x008,
+                                      8, 7,    /* N */
+                                      0, 5,    /* M */
+                                      pll_audio_sdm_table, BIT(24),
+                                      0x284, BIT(31),
+                                      BIT(31), /* gate */
+                                      BIT(28), /* lock */
+                                      CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
                                        "osc24M", 0x010,
@@ -538,9 +550,9 @@ static struct ccu_common *sun8i_a23_ccu_clks[] = {
        &ats_clk.common,
 };
 
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
 static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
-                       "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+                       "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
                        "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -720,10 +732,10 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
                return;
        }
 
-       /* Force the PLL-Audio-1x divider to 4 */
+       /* Force the PLL-Audio-1x divider to 1 */
        val = readl(reg + SUN8I_A23_PLL_AUDIO_REG);
        val &= ~GENMASK(19, 16);
-       writel(val | (3 << 16), reg + SUN8I_A23_PLL_AUDIO_REG);
+       writel(val | (0 << 16), reg + SUN8I_A23_PLL_AUDIO_REG);
 
        /* Force PLL-MIPI to MIPI mode */
        val = readl(reg + SUN8I_A23_PLL_MIPI_REG);
index e43acebdfbcdbe98d67009e4a98ae0619181018a..5cedcd0d8be8d49874a17500fda495a393dabda5 100644 (file)
@@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk,  "bus-tdm",      "apb1",
 static SUNXI_CCU_GATE(bus_i2c0_clk,    "bus-i2c0",     "apb2",
                      0x06c, BIT(0), 0);
 static SUNXI_CCU_GATE(bus_i2c1_clk,    "bus-i2c1",     "apb2",
-                     0x06c, BIT(0), 0);
+                     0x06c, BIT(1), 0);
 static SUNXI_CCU_GATE(bus_i2c2_clk,    "bus-i2c2",     "apb2",
-                     0x06c, BIT(0), 0);
+                     0x06c, BIT(2), 0);
 static SUNXI_CCU_GATE(bus_uart0_clk,   "bus-uart0",    "apb2",
                      0x06c, BIT(16), 0);
 static SUNXI_CCU_GATE(bus_uart1_clk,   "bus-uart1",    "apb2",
@@ -506,7 +506,7 @@ static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
                                       csi_mclk_parents, csi_mclk_table,
                                       0x134,
                                       0, 5,    /* M */
-                                      10, 3,   /* mux */
+                                      8, 3,    /* mux */
                                       BIT(15), /* gate */
                                       0);
 
index 5cdaf52669e443de01829f699bd72231db6bbf05..5cc9d9952121308767c2eea7b1365efcd3a27f1d 100644 (file)
@@ -41,11 +41,16 @@ static SUNXI_CCU_GATE(wb_clk,               "wb",           "wb-div",
 
 static SUNXI_CCU_M(mixer0_div_clk, "mixer0-div", "de", 0x0c, 0, 4,
                   CLK_SET_RATE_PARENT);
-static SUNXI_CCU_M(mixer1_div_clk, "mixer1-div", "de", 0x0c, 4, 4,
-                  CLK_SET_RATE_PARENT);
 static SUNXI_CCU_M(wb_div_clk, "wb-div", "de", 0x0c, 8, 4,
                   CLK_SET_RATE_PARENT);
 
+static SUNXI_CCU_M(mixer0_div_a83_clk, "mixer0-div", "pll-de", 0x0c, 0, 4,
+                  CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(mixer1_div_a83_clk, "mixer1-div", "pll-de", 0x0c, 4, 4,
+                  CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
+                  CLK_SET_RATE_PARENT);
+
 static struct ccu_common *sun8i_a83t_de2_clks[] = {
        &mixer0_clk.common,
        &mixer1_clk.common,
@@ -55,9 +60,9 @@ static struct ccu_common *sun8i_a83t_de2_clks[] = {
        &bus_mixer1_clk.common,
        &bus_wb_clk.common,
 
-       &mixer0_div_clk.common,
-       &mixer1_div_clk.common,
-       &wb_div_clk.common,
+       &mixer0_div_a83_clk.common,
+       &mixer1_div_a83_clk.common,
+       &wb_div_a83_clk.common,
 };
 
 static struct ccu_common *sun8i_v3s_de2_clks[] = {
@@ -81,9 +86,9 @@ static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
                [CLK_BUS_MIXER1]        = &bus_mixer1_clk.common.hw,
                [CLK_BUS_WB]            = &bus_wb_clk.common.hw,
 
-               [CLK_MIXER0_DIV]        = &mixer0_div_clk.common.hw,
-               [CLK_MIXER1_DIV]        = &mixer1_div_clk.common.hw,
-               [CLK_WB_DIV]            = &wb_div_clk.common.hw,
+               [CLK_MIXER0_DIV]        = &mixer0_div_a83_clk.common.hw,
+               [CLK_MIXER1_DIV]        = &mixer1_div_a83_clk.common.hw,
+               [CLK_WB_DIV]            = &wb_div_a83_clk.common.hw,
        },
        .num    = CLK_NUMBER,
 };
index 1729ff6a5aaed90fdd9749b2030c92b4d5616619..29bc0566b776e7444f8a07c9a8660073a1ddadb8 100644 (file)
@@ -26,6 +26,7 @@
 #include "ccu_nkmp.h"
 #include "ccu_nm.h"
 #include "ccu_phase.h"
+#include "ccu_sdm.h"
 
 #include "ccu-sun8i-h3.h"
 
@@ -37,25 +38,36 @@ static SUNXI_CCU_NKMP_WITH_GATE_LOCK(pll_cpux_clk, "pll-cpux",
                                     16, 2,     /* P */
                                     BIT(31),   /* gate */
                                     BIT(28),   /* lock */
-                                    0);
+                                    CLK_SET_RATE_UNGATE);
 
 /*
  * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
  * the base (2x, 4x and 8x), and one variable divider (the one true
  * pll audio).
  *
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
  */
 #define SUN8I_H3_PLL_AUDIO_REG 0x008
 
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
-                                  "osc24M", 0x008,
-                                  8, 7,        /* N */
-                                  0, 5,        /* M */
-                                  BIT(31),     /* gate */
-                                  BIT(28),     /* lock */
-                                  0);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+       { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+       { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+                                      "osc24M", 0x008,
+                                      8, 7,    /* N */
+                                      0, 5,    /* M */
+                                      pll_audio_sdm_table, BIT(24),
+                                      0x284, BIT(31),
+                                      BIT(31), /* gate */
+                                      BIT(28), /* lock */
+                                      CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
                                        "osc24M", 0x0010,
@@ -67,7 +79,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
                                        297000000,      /* frac rate 1 */
                                        BIT(31),        /* gate */
                                        BIT(28),        /* lock */
-                                       0);
+                                       CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
                                        "osc24M", 0x0018,
@@ -79,7 +91,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
                                        297000000,      /* frac rate 1 */
                                        BIT(31),        /* gate */
                                        BIT(28),        /* lock */
-                                       0);
+                                       CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
                                    "osc24M", 0x020,
@@ -88,7 +100,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
                                    0, 2,       /* M */
                                    BIT(31),    /* gate */
                                    BIT(28),    /* lock */
-                                   0);
+                                   CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0",
                                           "osc24M", 0x028,
@@ -97,7 +109,7 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0",
                                           BIT(31),     /* gate */
                                           BIT(28),     /* lock */
                                           2,           /* post-div */
-                                          0);
+                                          CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
                                        "osc24M", 0x0038,
@@ -109,7 +121,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
                                        297000000,      /* frac rate 1 */
                                        BIT(31),        /* gate */
                                        BIT(28),        /* lock */
-                                       0);
+                                       CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
                                           "osc24M", 0x044,
@@ -118,7 +130,7 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
                                           BIT(31),     /* gate */
                                           BIT(28),     /* lock */
                                           2,           /* post-div */
-                                          0);
+                                          CLK_SET_RATE_UNGATE);
 
 static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
                                        "osc24M", 0x0048,
@@ -130,7 +142,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
                                        297000000,      /* frac rate 1 */
                                        BIT(31),        /* gate */
                                        BIT(28),        /* lock */
-                                       0);
+                                       CLK_SET_RATE_UNGATE);
 
 static const char * const cpux_parents[] = { "osc32k", "osc24M",
                                             "pll-cpux" , "pll-cpux" };
@@ -484,7 +496,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
                                 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
 
 static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
-                            0x1a0, 0, 3, BIT(31), 0);
+                            0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
 
 static struct ccu_common *sun8i_h3_ccu_clks[] = {
        &pll_cpux_clk.common,
@@ -707,9 +719,9 @@ static struct ccu_common *sun50i_h5_ccu_clks[] = {
        &gpu_clk.common,
 };
 
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
 static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
-                       "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+                       "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
                        "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
 static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1129,10 +1141,10 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
                return;
        }
 
-       /* Force the PLL-Audio-1x divider to 4 */
+       /* Force the PLL-Audio-1x divider to 1 */
        val = readl(reg + SUN8I_H3_PLL_AUDIO_REG);
        val &= ~GENMASK(19, 16);
-       writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
+       writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
 
        sunxi_ccu_probe(node, reg, desc);
 
index cadd1a9f93b624c8633c946a7d35cecee0b6fd8d..5d684ce77c548e24be2a0fa8f005269dc2f273ad 100644 (file)
@@ -24,6 +24,7 @@
 #define CCU_FEATURE_ALL_PREDIV         BIT(4)
 #define CCU_FEATURE_LOCK_REG           BIT(5)
 #define CCU_FEATURE_MMC_TIMING_SWITCH  BIT(6)
+#define CCU_FEATURE_SIGMA_DELTA_MOD    BIT(7)
 
 /* MMC timing mode switch bit */
 #define CCU_MMC_NEW_TIMING_MODE                BIT(30)
index a32158e8f2e35a4fa89d5d21111e11e17a518ee8..7620aa973a6e49f5af0371508eacfadc864f5508 100644 (file)
@@ -90,6 +90,14 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
        if (!m)
                m++;
 
+       if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm)) {
+               unsigned long rate =
+                       ccu_sdm_helper_read_rate(&nm->common, &nm->sdm,
+                                                m, n);
+               if (rate)
+                       return rate;
+       }
+
        return parent_rate * n / m;
 }
 
@@ -99,6 +107,12 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
        struct ccu_nm *nm = hw_to_ccu_nm(hw);
        struct _ccu_nm _nm;
 
+       if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
+               return rate;
+
+       if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate))
+               return rate;
+
        _nm.min_n = nm->n.min ?: 1;
        _nm.max_n = nm->n.max ?: 1 << nm->n.width;
        _nm.min_m = 1;
@@ -140,7 +154,16 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
        _nm.min_m = 1;
        _nm.max_m = nm->m.max ?: 1 << nm->m.width;
 
-       ccu_nm_find_best(parent_rate, rate, &_nm);
+       if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
+               ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
+
+               /* Sigma delta modulation requires specific N and M factors */
+               ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
+                                          &_nm.m, &_nm.n);
+       } else {
+               ccu_sdm_helper_disable(&nm->common, &nm->sdm);
+               ccu_nm_find_best(parent_rate, rate, &_nm);
+       }
 
        spin_lock_irqsave(nm->common.lock, flags);
 
index e87fd186da7830cf4d79bfb5c8b621154735487c..c623b0c7a23c3f7229445aa0b40051245f95b028 100644 (file)
@@ -20,6 +20,7 @@
 #include "ccu_div.h"
 #include "ccu_frac.h"
 #include "ccu_mult.h"
+#include "ccu_sdm.h"
 
 /*
  * struct ccu_nm - Definition of an N-M clock
@@ -33,10 +34,34 @@ struct ccu_nm {
        struct ccu_mult_internal        n;
        struct ccu_div_internal         m;
        struct ccu_frac_internal        frac;
+       struct ccu_sdm_internal         sdm;
 
        struct ccu_common       common;
 };
 
+#define SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(_struct, _name, _parent, _reg, \
+                                       _nshift, _nwidth,               \
+                                       _mshift, _mwidth,               \
+                                       _sdm_table, _sdm_en,            \
+                                       _sdm_reg, _sdm_reg_en,          \
+                                       _gate, _lock, _flags)           \
+       struct ccu_nm _struct = {                                       \
+               .enable         = _gate,                                \
+               .lock           = _lock,                                \
+               .n              = _SUNXI_CCU_MULT(_nshift, _nwidth),    \
+               .m              = _SUNXI_CCU_DIV(_mshift, _mwidth),     \
+               .sdm            = _SUNXI_CCU_SDM(_sdm_table, _sdm_en,   \
+                                                _sdm_reg, _sdm_reg_en),\
+               .common         = {                                     \
+                       .reg            = _reg,                         \
+                       .features       = CCU_FEATURE_SIGMA_DELTA_MOD,  \
+                       .hw.init        = CLK_HW_INIT(_name,            \
+                                                     _parent,          \
+                                                     &ccu_nm_ops,      \
+                                                     _flags),          \
+               },                                                      \
+       }
+
 #define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(_struct, _name, _parent, _reg,        \
                                         _nshift, _nwidth,              \
                                         _mshift, _mwidth,              \
index 1dc4e98ea8023d592a700b11896d3b0dbcbccd40..b67149143554e45247fd55450a4134367a773fe9 100644 (file)
@@ -60,8 +60,22 @@ static int ccu_reset_reset(struct reset_controller_dev *rcdev,
        return 0;
 }
 
+static int ccu_reset_status(struct reset_controller_dev *rcdev,
+                           unsigned long id)
+{
+       struct ccu_reset *ccu = rcdev_to_ccu_reset(rcdev);
+       const struct ccu_reset_map *map = &ccu->reset_map[id];
+
+       /*
+        * The reset control API expects 0 if reset is not asserted,
+        * which is the opposite of what our hardware uses.
+        */
+       return !(map->bit & readl(ccu->base + map->reg));
+}
+
 const struct reset_control_ops ccu_reset_ops = {
        .assert         = ccu_reset_assert,
        .deassert       = ccu_reset_deassert,
        .reset          = ccu_reset_reset,
+       .status         = ccu_reset_status,
 };
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
new file mode 100644 (file)
index 0000000..3b3dc9b
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2017 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "ccu_sdm.h"
+
+bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
+                              struct ccu_sdm_internal *sdm)
+{
+       if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+               return false;
+
+       if (sdm->enable && !(readl(common->base + common->reg) & sdm->enable))
+               return false;
+
+       return !!(readl(common->base + sdm->tuning_reg) & sdm->tuning_enable);
+}
+
+void ccu_sdm_helper_enable(struct ccu_common *common,
+                          struct ccu_sdm_internal *sdm,
+                          unsigned long rate)
+{
+       unsigned long flags;
+       unsigned int i;
+       u32 reg;
+
+       if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+               return;
+
+       /* Set the pattern */
+       for (i = 0; i < sdm->table_size; i++)
+               if (sdm->table[i].rate == rate)
+                       writel(sdm->table[i].pattern,
+                              common->base + sdm->tuning_reg);
+
+       /* Make sure SDM is enabled */
+       spin_lock_irqsave(common->lock, flags);
+       reg = readl(common->base + sdm->tuning_reg);
+       writel(reg | sdm->tuning_enable, common->base + sdm->tuning_reg);
+       spin_unlock_irqrestore(common->lock, flags);
+
+       spin_lock_irqsave(common->lock, flags);
+       reg = readl(common->base + common->reg);
+       writel(reg | sdm->enable, common->base + common->reg);
+       spin_unlock_irqrestore(common->lock, flags);
+}
+
+void ccu_sdm_helper_disable(struct ccu_common *common,
+                           struct ccu_sdm_internal *sdm)
+{
+       unsigned long flags;
+       u32 reg;
+
+       if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+               return;
+
+       spin_lock_irqsave(common->lock, flags);
+       reg = readl(common->base + common->reg);
+       writel(reg & ~sdm->enable, common->base + common->reg);
+       spin_unlock_irqrestore(common->lock, flags);
+
+       spin_lock_irqsave(common->lock, flags);
+       reg = readl(common->base + sdm->tuning_reg);
+       writel(reg & ~sdm->tuning_enable, common->base + sdm->tuning_reg);
+       spin_unlock_irqrestore(common->lock, flags);
+}
+
+/*
+ * Sigma delta modulation provides a way to do fractional-N frequency
+ * synthesis, in essence allowing the PLL to output any frequency
+ * within its operational range. On earlier SoCs such as the A10/A20,
+ * some PLLs support this. On later SoCs, all PLLs support this.
+ *
+ * The datasheets do not explain what the "wave top" and "wave bottom"
+ * parameters mean or do, nor how to calculate the effective output
+ * frequency. The only examples (and real world usage) are for the audio
+ * PLL to generate 24.576 and 22.5792 MHz clock rates used by the audio
+ * peripherals. The author lacks the underlying domain knowledge to
+ * pursue this.
+ *
+ * The goal and function of the following code is to support the two
+ * clock rates used by the audio subsystem, allowing for proper audio
+ * playback and capture without any pitch or speed changes.
+ */
+bool ccu_sdm_helper_has_rate(struct ccu_common *common,
+                            struct ccu_sdm_internal *sdm,
+                            unsigned long rate)
+{
+       unsigned int i;
+
+       if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+               return false;
+
+       for (i = 0; i < sdm->table_size; i++)
+               if (sdm->table[i].rate == rate)
+                       return true;
+
+       return false;
+}
+
+unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
+                                      struct ccu_sdm_internal *sdm,
+                                      u32 m, u32 n)
+{
+       unsigned int i;
+       u32 reg;
+
+       pr_debug("%s: Read sigma-delta modulation setting\n",
+                clk_hw_get_name(&common->hw));
+
+       if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+               return 0;
+
+       pr_debug("%s: clock is sigma-delta modulated\n",
+                clk_hw_get_name(&common->hw));
+
+       reg = readl(common->base + sdm->tuning_reg);
+
+       pr_debug("%s: pattern reg is 0x%x",
+                clk_hw_get_name(&common->hw), reg);
+
+       for (i = 0; i < sdm->table_size; i++)
+               if (sdm->table[i].pattern == reg &&
+                   sdm->table[i].m == m && sdm->table[i].n == n)
+                       return sdm->table[i].rate;
+
+       /* We can't calculate the effective clock rate, so just fail. */
+       return 0;
+}
+
+int ccu_sdm_helper_get_factors(struct ccu_common *common,
+                              struct ccu_sdm_internal *sdm,
+                              unsigned long rate,
+                              unsigned long *m, unsigned long *n)
+{
+       unsigned int i;
+
+       if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+               return -EINVAL;
+
+       for (i = 0; i < sdm->table_size; i++)
+               if (sdm->table[i].rate == rate) {
+                       *m = sdm->table[i].m;
+                       *n = sdm->table[i].n;
+                       return 0;
+               }
+
+       /* nothing found */
+       return -EINVAL;
+}
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.h b/drivers/clk/sunxi-ng/ccu_sdm.h
new file mode 100644 (file)
index 0000000..2a9b4a2
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SDM_H
+#define _CCU_SDM_H
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_sdm_setting {
+       unsigned long   rate;
+
+       /*
+        * XXX We don't know what the step and bottom register fields
+        * mean. Just copy the whole register value from the vendor
+        * kernel for now.
+        */
+       u32             pattern;
+
+       /*
+        * M and N factors here should be the values used in
+        * calculation, not the raw values written to registers
+        */
+       u32             m;
+       u32             n;
+};
+
+struct ccu_sdm_internal {
+       struct ccu_sdm_setting  *table;
+       u32             table_size;
+       /* early SoCs don't have the SDM enable bit in the PLL register */
+       u32             enable;
+       /* second enable bit in tuning register */
+       u32             tuning_enable;
+       u16             tuning_reg;
+};
+
+#define _SUNXI_CCU_SDM(_table, _enable,                        \
+                      _reg, _reg_enable)               \
+       {                                               \
+               .table          = _table,               \
+               .table_size     = ARRAY_SIZE(_table),   \
+               .enable         = _enable,              \
+               .tuning_enable  = _reg_enable,          \
+               .tuning_reg     = _reg,                 \
+       }
+
+bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
+                              struct ccu_sdm_internal *sdm);
+void ccu_sdm_helper_enable(struct ccu_common *common,
+                          struct ccu_sdm_internal *sdm,
+                          unsigned long rate);
+void ccu_sdm_helper_disable(struct ccu_common *common,
+                           struct ccu_sdm_internal *sdm);
+
+bool ccu_sdm_helper_has_rate(struct ccu_common *common,
+                            struct ccu_sdm_internal *sdm,
+                            unsigned long rate);
+
+unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
+                                      struct ccu_sdm_internal *sdm,
+                                      u32 m, u32 n);
+
+int ccu_sdm_helper_get_factors(struct ccu_common *common,
+                              struct ccu_sdm_internal *sdm,
+                              unsigned long rate,
+                              unsigned long *m, unsigned long *n);
+
+#endif
index dfe5e3e32d28f050388f05cb0247f101eb69dc74..856fef65433b4b013b8e0c27a2f919b7690f2325 100644 (file)
@@ -276,13 +276,11 @@ void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
 {
        struct clk_hw *hw = __clk_get_hw(clk);
        struct clk_factors *factors;
-       const char *name;
 
        if (!hw)
                return;
 
        factors = to_clk_factors(hw);
-       name = clk_hw_get_name(hw);
 
        of_clk_del_provider(node);
        /* TODO: The composite clock stuff will leak a bit here. */
index 6041bdba2e971ecb93c3047c14854e676750bf5e..a1a634253d6f2299bfad888b2fa193c98b4ac019 100644 (file)
@@ -124,7 +124,7 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
                return PTR_ERR(data->clk);
        }
 
-       data->reset = devm_reset_control_get(&pdev->dev, NULL);
+       data->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
        if (IS_ERR(data->reset)) {
                dev_err(&pdev->dev, "Could not get reset control\n");
                return PTR_ERR(data->reset);
index 638ace64033b92a54b930c4bdf1be65550241099..a896692b74ec8eb70c75d9539914928672984e0d 100644 (file)
@@ -55,6 +55,7 @@ struct tegra_bpmp_clk_message {
        struct {
                void *data;
                size_t size;
+               int ret;
        } rx;
 };
 
@@ -64,6 +65,7 @@ static int tegra_bpmp_clk_transfer(struct tegra_bpmp *bpmp,
        struct mrq_clk_request request;
        struct tegra_bpmp_message msg;
        void *req = &request;
+       int err;
 
        memset(&request, 0, sizeof(request));
        request.cmd_and_id = (clk->cmd << 24) | clk->id;
@@ -84,7 +86,13 @@ static int tegra_bpmp_clk_transfer(struct tegra_bpmp *bpmp,
        msg.rx.data = clk->rx.data;
        msg.rx.size = clk->rx.size;
 
-       return tegra_bpmp_transfer(bpmp, &msg);
+       err = tegra_bpmp_transfer(bpmp, &msg);
+       if (err < 0)
+               return err;
+       else if (msg.rx.ret < 0)
+               return -EINVAL;
+
+       return 0;
 }
 
 static int tegra_bpmp_clk_prepare(struct clk_hw *hw)
@@ -414,11 +422,8 @@ static int tegra_bpmp_probe_clocks(struct tegra_bpmp *bpmp,
                struct tegra_bpmp_clk_info *info = &clocks[count];
 
                err = tegra_bpmp_clk_get_info(bpmp, id, info);
-               if (err < 0) {
-                       dev_err(bpmp->dev, "failed to query clock %u: %d\n",
-                               id, err);
+               if (err < 0)
                        continue;
-               }
 
                if (info->num_parents >= U8_MAX) {
                        dev_err(bpmp->dev,
index 2c44aeb0b97c7b437fa79b2cdf82f8ee9df577b0..0a7deee74eea5846d5c76788828dc73d8f844c7d 100644 (file)
@@ -1728,10 +1728,10 @@ EXPORT_SYMBOL(tegra_dfll_register);
  * @pdev: DFLL platform_device *
  *
  * Unbind this driver from the DFLL hardware device represented by
- * @pdev. The DFLL must be disabled for this to succeed. Returns 0
- * upon success or -EBUSY if the DFLL is still active.
+ * @pdev. The DFLL must be disabled for this to succeed. Returns a
+ * soc pointer upon success or -EBUSY if the DFLL is still active.
  */
-int tegra_dfll_unregister(struct platform_device *pdev)
+struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev)
 {
        struct tegra_dfll *td = platform_get_drvdata(pdev);
 
@@ -1739,7 +1739,7 @@ int tegra_dfll_unregister(struct platform_device *pdev)
        if (td->mode != DFLL_DISABLED) {
                dev_err(&pdev->dev,
                        "must disable DFLL before removing driver\n");
-               return -EBUSY;
+               return ERR_PTR(-EBUSY);
        }
 
        debugfs_remove_recursive(td->debugfs_dir);
@@ -1753,6 +1753,6 @@ int tegra_dfll_unregister(struct platform_device *pdev)
 
        reset_control_assert(td->dvco_rst);
 
-       return 0;
+       return td->soc;
 }
 EXPORT_SYMBOL(tegra_dfll_unregister);
index ed2ad888268f39bd27d7ead71eb3875137f1d937..83352c8078f27a9644d7d5bb165432d1bcddef37 100644 (file)
@@ -43,7 +43,7 @@ struct tegra_dfll_soc_data {
 
 int tegra_dfll_register(struct platform_device *pdev,
                        struct tegra_dfll_soc_data *soc);
-int tegra_dfll_unregister(struct platform_device *pdev);
+struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev);
 int tegra_dfll_runtime_suspend(struct device *dev);
 int tegra_dfll_runtime_resume(struct device *dev);
 
index 11ee5f9ce99e134a73851d0a5709114d0e0aa1a9..b616e33c525574fa7752362c6f8fe2297923d97b 100644 (file)
@@ -13,6 +13,7 @@ enum clk_id {
        tegra_clk_amx,
        tegra_clk_amx1,
        tegra_clk_apb2ape,
+       tegra_clk_ahbdma,
        tegra_clk_apbdma,
        tegra_clk_apbif,
        tegra_clk_ape,
index cf80831de79d63260eb94b1208b76c216c5a20b8..9475c00b7cf9b7dd645cec8df1a5d5e6b1adbb80 100644 (file)
@@ -203,3 +203,11 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
        return _tegra_clk_register_periph(name, parent_names, num_parents,
                        periph, clk_base, offset, CLK_SET_RATE_PARENT);
 }
+
+struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
+                                          struct tegra_periph_init_data *init)
+{
+       return _tegra_clk_register_periph(init->name, init->p.parent_names,
+                                         init->num_parents, &init->periph,
+                                         clk_base, init->offset, init->flags);
+}
index 848255cc0209911ec5781ceb2d09107244bcf659..c02711927d7912cac57e74194df6781adfa1764c 100644 (file)
 #define CLK_SOURCE_NVDEC 0x698
 #define CLK_SOURCE_NVJPG 0x69c
 #define CLK_SOURCE_APE 0x6c0
-#define CLK_SOURCE_SOR1 0x410
 #define CLK_SOURCE_SDMMC_LEGACY 0x694
 #define CLK_SOURCE_QSPI 0x6c4
 #define CLK_SOURCE_VI_I2C 0x6c8
@@ -278,7 +277,6 @@ static DEFINE_SPINLOCK(PLLP_OUTA_lock);
 static DEFINE_SPINLOCK(PLLP_OUTB_lock);
 static DEFINE_SPINLOCK(PLLP_OUTC_lock);
 static DEFINE_SPINLOCK(sor0_lock);
-static DEFINE_SPINLOCK(sor1_lock);
 
 #define MUX_I2S_SPDIF(_id)                                             \
 static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
@@ -604,18 +602,6 @@ static u32 mux_pllp_plld_plld2_clkm_idx[] = {
        [0] = 0, [1] = 2, [2] = 5, [3] = 6
 };
 
-static const char *mux_sor_safe_sor1_brick_sor1_src[] = {
-       /*
-        * Bit 0 of the mux selects sor1_brick, irrespective of bit 1, so the
-        * sor1_brick parent appears twice in the list below. This is merely
-        * to support clk_get_parent() if firmware happened to set these bits
-        * to 0b11. While not an invalid setting, code should always set the
-        * bits to 0b01 to select sor1_brick.
-        */
-       "sor_safe", "sor1_brick", "sor1_src", "sor1_brick"
-};
-#define mux_sor_safe_sor1_brick_sor1_src_idx NULL
-
 static const char *mux_pllp_pllre_clkm[] = {
        "pll_p", "pll_re_out1", "clk_m"
 };
@@ -804,8 +790,6 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
        MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
        MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
-       MUX8_NOGATE_LOCK("sor1_src", mux_pllp_plld_plld2_clkm, CLK_SOURCE_SOR1, tegra_clk_sor1_src, &sor1_lock),
-       NODIV("sor1", mux_sor_safe_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 14, MASK(2), 183, 0, tegra_clk_sor1, &sor1_lock),
        MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
        MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
        I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
@@ -823,7 +807,8 @@ static struct tegra_periph_init_data gate_clks[] = {
        GATE("timer", "clk_m", 5, 0, tegra_clk_timer, CLK_IS_CRITICAL),
        GATE("isp", "clk_m", 23, 0, tegra_clk_isp, 0),
        GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
-       GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
+       GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
+       GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
        GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
        GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
        GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
@@ -927,10 +912,7 @@ static void __init periph_clk_init(void __iomem *clk_base,
                        continue;
 
                data->periph.gate.regs = bank;
-               clk = tegra_clk_register_periph(data->name,
-                       data->p.parent_names, data->num_parents,
-                       &data->periph, clk_base, data->offset,
-                       data->flags);
+               clk = tegra_clk_register_periph_data(clk_base, data);
                *dt_clk = clk;
        }
 }
index 4f6fd307cb706d2d38003203417a2a9adf70a92e..10047107c1dc39720275537b5dddb86db4438e6a 100644 (file)
@@ -166,7 +166,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
                                   clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
                                   &sysrate_lock);
        clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
-                               CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
+                               CLK_IS_CRITICAL, clk_base + SYSTEM_CLK_RATE,
                                3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
        *dt_clk = clk;
 }
index fd1a99c05c2dc20214d575313fcf2a3964ac945f..63087d17c3e2c31f0e4e29127a7885c8a510f924 100644 (file)
@@ -1092,9 +1092,7 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
 
        for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
                data = &tegra_periph_clk_list[i];
-               clk = tegra_clk_register_periph(data->name,
-                       data->p.parent_names, data->num_parents,
-                       &data->periph, clk_base, data->offset, data->flags);
+               clk = tegra_clk_register_periph_data(clk_base, data);
                clks[data->clk_id] = clk;
        }
 
index ad1c1cc829cba46591ba458e3b637441b20f3638..269d3595758bebabf0f72d6448ba6633e9cd3c8f 100644 (file)
@@ -125,19 +125,17 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
                return err;
        }
 
-       platform_set_drvdata(pdev, soc);
-
        return 0;
 }
 
 static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
 {
-       struct tegra_dfll_soc_data *soc = platform_get_drvdata(pdev);
-       int err;
+       struct tegra_dfll_soc_data *soc;
 
-       err = tegra_dfll_unregister(pdev);
-       if (err < 0)
-               dev_err(&pdev->dev, "failed to unregister DFLL: %d\n", err);
+       soc = tegra_dfll_unregister(pdev);
+       if (IS_ERR(soc))
+               dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
+                       PTR_ERR(soc));
 
        tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
 
index 837e5cbd60e9ce842c97e87151b9782779ed81be..cbd5a2e5c569bc2b3471eb4154aa42ee6e083f97 100644 (file)
@@ -522,6 +522,8 @@ static struct tegra_devclk devclks[] __initdata = {
 };
 
 static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
+       [tegra_clk_ahbdma] = { .dt_id = TEGRA20_CLK_AHBDMA, .present = true },
+       [tegra_clk_apbdma] = { .dt_id = TEGRA20_CLK_APBDMA, .present = true },
        [tegra_clk_spdif_out] = { .dt_id = TEGRA20_CLK_SPDIF_OUT, .present = true },
        [tegra_clk_spdif_in] = { .dt_id = TEGRA20_CLK_SPDIF_IN, .present = true },
        [tegra_clk_sdmmc1] = { .dt_id = TEGRA20_CLK_SDMMC1, .present = true },
@@ -806,11 +808,6 @@ static void __init tegra20_periph_clk_init(void)
                                    clk_base, 0, 3, periph_clk_enb_refcnt);
        clks[TEGRA20_CLK_AC97] = clk;
 
-       /* apbdma */
-       clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
-                                   0, 34, periph_clk_enb_refcnt);
-       clks[TEGRA20_CLK_APBDMA] = clk;
-
        /* emc */
        clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
                               ARRAY_SIZE(mux_pllmcp_clkm),
@@ -850,9 +847,7 @@ static void __init tegra20_periph_clk_init(void)
 
        for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
                data = &tegra_periph_clk_list[i];
-               clk = tegra_clk_register_periph(data->name, data->p.parent_names,
-                               data->num_parents, &data->periph,
-                               clk_base, data->offset, data->flags);
+               clk = tegra_clk_register_periph_data(clk_base, data);
                clks[data->clk_id] = clk;
        }
 
@@ -1025,7 +1020,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1 },
        { TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1 },
        { TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1 },
-       { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 120000000, 1 },
+       { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
        { TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1 },
        { TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
        { TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1 },
index 6d7a613f2656a4f8af55e0905a0c8c0daec6c0dc..9e6260869eb94bee9e9b3c5f17dd7aedbc6c0a79 100644 (file)
@@ -40,6 +40,7 @@
 
 #define CLK_SOURCE_CSITE 0x1d4
 #define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_SOR1 0x410
 
 #define PLLC_BASE 0x80
 #define PLLC_OUT 0x84
@@ -264,6 +265,7 @@ static DEFINE_SPINLOCK(pll_d_lock);
 static DEFINE_SPINLOCK(pll_e_lock);
 static DEFINE_SPINLOCK(pll_re_lock);
 static DEFINE_SPINLOCK(pll_u_lock);
+static DEFINE_SPINLOCK(sor1_lock);
 static DEFINE_SPINLOCK(emc_lock);
 
 /* possible OSC frequencies in Hz */
@@ -2566,8 +2568,8 @@ static int tegra210_enable_pllu(void)
        reg |= PLL_ENABLE;
        writel(reg, clk_base + PLLU_BASE);
 
-       readl_relaxed_poll_timeout(clk_base + PLLU_BASE, reg,
-                                  reg & PLL_BASE_LOCK, 2, 1000);
+       readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg,
+                                         reg & PLL_BASE_LOCK, 2, 1000);
        if (!(reg & PLL_BASE_LOCK)) {
                pr_err("Timed out waiting for PLL_U to lock\n");
                return -ETIMEDOUT;
@@ -2628,10 +2630,35 @@ static int tegra210_init_pllu(void)
        return 0;
 }
 
+static const char * const sor1_out_parents[] = {
+       /*
+        * Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so
+        * the sor1_pad_clkout parent appears twice in the list below. This is
+        * merely to support clk_get_parent() if firmware happened to set
+        * these bits to 0b11. While not an invalid setting, code should
+        * always set the bits to 0b01 to select sor1_pad_clkout.
+        */
+       "sor_safe", "sor1_pad_clkout", "sor1", "sor1_pad_clkout",
+};
+
+static const char * const sor1_parents[] = {
+       "pll_p", "pll_d_out0", "pll_d2_out0", "clk_m",
+};
+
+static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
+
+static struct tegra_periph_init_data tegra210_periph[] = {
+       TEGRA_INIT_DATA_TABLE("sor1", NULL, NULL, sor1_parents,
+                             CLK_SOURCE_SOR1, 29, 0x7, 0, 0, 8, 1,
+                             TEGRA_DIVIDER_ROUND_UP, 183, 0, tegra_clk_sor1,
+                             sor1_parents_idx, 0, &sor1_lock),
+};
+
 static __init void tegra210_periph_clk_init(void __iomem *clk_base,
                                            void __iomem *pmc_base)
 {
        struct clk *clk;
+       unsigned int i;
 
        /* xusb_ss_div2 */
        clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0,
@@ -2650,6 +2677,12 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
                                              1, 17, 207);
        clks[TEGRA210_CLK_DPAUX1] = clk;
 
+       clk = clk_register_mux_table(NULL, "sor1_out", sor1_out_parents,
+                                    ARRAY_SIZE(sor1_out_parents), 0,
+                                    clk_base + CLK_SOURCE_SOR1, 14, 0x3,
+                                    0, NULL, &sor1_lock);
+       clks[TEGRA210_CLK_SOR1_OUT] = clk;
+
        /* pll_d_dsi_out */
        clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
                                clk_base + PLLD_MISC0, 21, 0, &pll_d_lock);
@@ -2694,6 +2727,20 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
                                0, NULL);
        clks[TEGRA210_CLK_ACLK] = clk;
 
+       for (i = 0; i < ARRAY_SIZE(tegra210_periph); i++) {
+               struct tegra_periph_init_data *init = &tegra210_periph[i];
+               struct clk **clkp;
+
+               clkp = tegra_lookup_dt_id(init->clk_id, tegra210_clks);
+               if (!clkp) {
+                       pr_warn("clock %u not found\n", init->clk_id);
+                       continue;
+               }
+
+               clk = tegra_clk_register_periph_data(clk_base, init);
+               *clkp = clk;
+       }
+
        tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params);
 }
 
index a2d163f759b4502df2ad4f1f4e5d738904da67a7..bee84c554932ce1f54f5f32eff0078f7b7f19868 100644 (file)
@@ -359,7 +359,7 @@ static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
 };
 
 /* PLL parameters */
-static struct tegra_clk_pll_params pll_c_params = {
+static struct tegra_clk_pll_params pll_c_params __ro_after_init = {
        .input_min = 2000000,
        .input_max = 31000000,
        .cf_min = 1000000,
@@ -388,7 +388,7 @@ static struct div_nmp pllm_nmp = {
        .override_divp_shift = 15,
 };
 
-static struct tegra_clk_pll_params pll_m_params = {
+static struct tegra_clk_pll_params pll_m_params __ro_after_init = {
        .input_min = 2000000,
        .input_max = 31000000,
        .cf_min = 1000000,
@@ -409,7 +409,7 @@ static struct tegra_clk_pll_params pll_m_params = {
                 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_FIXED,
 };
 
-static struct tegra_clk_pll_params pll_p_params = {
+static struct tegra_clk_pll_params pll_p_params __ro_after_init = {
        .input_min = 2000000,
        .input_max = 31000000,
        .cf_min = 1000000,
@@ -444,7 +444,7 @@ static struct tegra_clk_pll_params pll_a_params = {
                 TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
-static struct tegra_clk_pll_params pll_d_params = {
+static struct tegra_clk_pll_params pll_d_params __ro_after_init = {
        .input_min = 2000000,
        .input_max = 40000000,
        .cf_min = 1000000,
@@ -461,7 +461,7 @@ static struct tegra_clk_pll_params pll_d_params = {
                 TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
-static struct tegra_clk_pll_params pll_d2_params = {
+static struct tegra_clk_pll_params pll_d2_params __ro_after_init = {
        .input_min = 2000000,
        .input_max = 40000000,
        .cf_min = 1000000,
@@ -478,7 +478,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
                 TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
-static struct tegra_clk_pll_params pll_u_params = {
+static struct tegra_clk_pll_params pll_u_params __ro_after_init = {
        .input_min = 2000000,
        .input_max = 40000000,
        .cf_min = 1000000,
@@ -496,7 +496,7 @@ static struct tegra_clk_pll_params pll_u_params = {
                 TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
-static struct tegra_clk_pll_params pll_x_params = {
+static struct tegra_clk_pll_params pll_x_params __ro_after_init = {
        .input_min = 2000000,
        .input_max = 31000000,
        .cf_min = 1000000,
@@ -513,7 +513,7 @@ static struct tegra_clk_pll_params pll_x_params = {
                 TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
-static struct tegra_clk_pll_params pll_e_params = {
+static struct tegra_clk_pll_params pll_e_params __ro_after_init = {
        .input_min = 12000000,
        .input_max = 216000000,
        .cf_min = 12000000,
@@ -788,6 +788,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
        [tegra_clk_extern3] = { .dt_id = TEGRA30_CLK_EXTERN3, .present = true },
        [tegra_clk_disp1] = { .dt_id = TEGRA30_CLK_DISP1, .present = true },
        [tegra_clk_disp2] = { .dt_id = TEGRA30_CLK_DISP2, .present = true },
+       [tegra_clk_ahbdma] = { .dt_id = TEGRA30_CLK_AHBDMA, .present = true },
        [tegra_clk_apbdma] = { .dt_id = TEGRA30_CLK_APBDMA, .present = true },
        [tegra_clk_rtc] = { .dt_id = TEGRA30_CLK_RTC, .present = true },
        [tegra_clk_timer] = { .dt_id = TEGRA30_CLK_TIMER, .present = true },
@@ -964,7 +965,7 @@ static void __init tegra30_super_clk_init(void)
         * U71 divider of cclk_lp.
         */
        clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
-                               clk_base + SUPER_CCLKG_DIVIDER, 0,
+                               clk_base + SUPER_CCLKLP_DIVIDER, 0,
                                TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
        clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
 
@@ -1079,9 +1080,7 @@ static void __init tegra30_periph_clk_init(void)
 
        for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
                data = &tegra_periph_clk_list[i];
-               clk = tegra_clk_register_periph(data->name, data->p.parent_names,
-                               data->num_parents, &data->periph,
-                               clk_base, data->offset, data->flags);
+               clk = tegra_clk_register_periph_data(clk_base, data);
                clks[data->clk_id] = clk;
        }
 
index 872f1189ad7fbc71d3635d7abf28a16092bf6646..3b2763df51c2e0f7c5a5113f8df68dbbc4cb9b21 100644 (file)
@@ -662,6 +662,9 @@ struct tegra_periph_init_data {
                        _clk_num, _gate_flags, _clk_id,\
                        NULL, 0, NULL)
 
+struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
+                                          struct tegra_periph_init_data *init);
+
 /**
  * struct clk_super_mux - super clock
  *
index 13eb04f72389bbf68e26a12f71b74188910e8ab8..14881547043130d1e686055387a6276e49fd11f9 100644 (file)
@@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
 
                /* Get configuration for the ATL instances */
                snprintf(prop, sizeof(prop), "atl%u", i);
-               of_node_get(node);
-               cfg_node = of_find_node_by_name(node, prop);
+               cfg_node = of_get_child_by_name(node, prop);
                if (cfg_node) {
                        ret = of_property_read_u32(cfg_node, "bws",
                                                   &cdesc->bws);
index 88f04a4cb890be4f68025d04d4bfaec4597dfda7..77f93f6d2806a19662b3f2c94128ee491cfc4fe2 100644 (file)
@@ -292,10 +292,8 @@ static struct clk *_register_divider(struct device *dev, const char *name,
 
        /* allocate the divider */
        div = kzalloc(sizeof(*div), GFP_KERNEL);
-       if (!div) {
-               pr_err("%s: could not allocate divider clk\n", __func__);
+       if (!div)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &ti_clk_divider_ops;
index 18c267b38461dc96ff1d5dd26f518136a342ffb3..d4705803f3d3c7f72cfdc9f969bde508c4a1197a 100644 (file)
@@ -108,10 +108,8 @@ static struct clk *_register_mux(struct device *dev, const char *name,
 
        /* allocate the mux */
        mux = kzalloc(sizeof(*mux), GFP_KERNEL);
-       if (!mux) {
-               pr_err("%s: could not allocate mux clk\n", __func__);
+       if (!mux)
                return ERR_PTR(-ENOMEM);
-       }
 
        init.name = name;
        init.ops = &ti_clk_mux_ops;
index 16e4d303f53593f78124d2c2dee9c771613dc7eb..badc478a86c6781e332163c03b87d37b97c71dec 100644 (file)
@@ -13,6 +13,8 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/stddef.h>
+
 #include "clk-uniphier.h"
 
 #define UNIPHIER_MIO_CLK_SD_FIXED                                      \
 #define UNIPHIER_MIO_CLK_USB2_PHY(idx, ch)                             \
        UNIPHIER_CLK_GATE("usb2" #ch "-phy", (idx), "usb2", 0x20 + 0x200 * (ch), 29)
 
-#define UNIPHIER_MIO_CLK_DMAC(idx)                                     \
-       UNIPHIER_CLK_GATE("miodmac", (idx), "stdmac", 0x20, 25)
-
 const struct uniphier_clk_data uniphier_ld4_mio_clk_data[] = {
        UNIPHIER_MIO_CLK_SD_FIXED,
        UNIPHIER_MIO_CLK_SD(0, 0),
        UNIPHIER_MIO_CLK_SD(1, 1),
        UNIPHIER_MIO_CLK_SD(2, 2),
-       UNIPHIER_MIO_CLK_DMAC(7),
+       UNIPHIER_CLK_GATE("miodmac", 7, NULL, 0x20, 25),
        UNIPHIER_MIO_CLK_USB2(8, 0),
        UNIPHIER_MIO_CLK_USB2(9, 1),
        UNIPHIER_MIO_CLK_USB2(10, 2),
index 07f3b91a7daf36f3f15873626312c68707709484..d244e724e19858dfb8b8c5297e9852a61ba0ce50 100644 (file)
@@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
 const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
        UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1),         /* 2400 MHz */
        UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1),       /* 2560 MHz */
-       UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125),     /* 2949.12 MHz */
+       UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125),  /* 2949.12 MHz */
        UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
        UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
        UNIPHIER_PRO5_SYS_CLK_NAND(2),
index f50592775c9d792cf21ced429ff10012ade2a937..7cfb59c9136d4169f4fc149a87d64e47f2a19389 100644 (file)
@@ -107,11 +107,9 @@ static struct clk *clk_reg_prcc(const char *name,
                return ERR_PTR(-EINVAL);
        }
 
-       clk = kzalloc(sizeof(struct clk_prcc), GFP_KERNEL);
-       if (!clk) {
-               pr_err("clk_prcc: %s could not allocate clk\n", __func__);
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk)
                return ERR_PTR(-ENOMEM);
-       }
 
        clk->base = ioremap(phy_base, SZ_4K);
        if (!clk->base)
index 6e3e16b2e5caa947543855538cdf064f85293578..9d1f2d4550ad92cfbd2058a1bb53b5ddd81662c0 100644 (file)
@@ -258,11 +258,9 @@ static struct clk *clk_reg_prcmu(const char *name,
                return ERR_PTR(-EINVAL);
        }
 
-       clk = kzalloc(sizeof(struct clk_prcmu), GFP_KERNEL);
-       if (!clk) {
-               pr_err("clk_prcmu: %s could not allocate clk\n", __func__);
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk)
                return ERR_PTR(-ENOMEM);
-       }
 
        clk->cg_sel = cg_sel;
        clk->is_prepared = 1;
index 8a4e93ce1e42cad595e8d8fa439e77f1ad67a2e0..7c0403b733ae4c5b236b58f3ea440cf0a2204a1c 100644 (file)
@@ -139,11 +139,9 @@ static struct clk *clk_reg_sysctrl(struct device *dev,
                return ERR_PTR(-EINVAL);
        }
 
-       clk = devm_kzalloc(dev, sizeof(struct clk_sysctrl), GFP_KERNEL);
-       if (!clk) {
-               dev_err(dev, "clk_sysctrl: could not allocate clk\n");
+       clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+       if (!clk)
                return ERR_PTR(-ENOMEM);
-       }
 
        /* set main clock registers */
        clk->reg_sel[0] = reg_sel[0];
index 09fbe66f1f11cb41507f104a7458ed38d83e75cd..dafe7a45875d9ef7e27bde705396e6c0d3cbe045 100644 (file)
@@ -359,16 +359,13 @@ static struct clk *icst_clk_setup(struct device *dev,
        struct clk_init_data init;
        struct icst_params *pclone;
 
-       icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL);
-       if (!icst) {
-               pr_err("could not allocate ICST clock!\n");
+       icst = kzalloc(sizeof(*icst), GFP_KERNEL);
+       if (!icst)
                return ERR_PTR(-ENOMEM);
-       }
 
        pclone = kmemdup(desc->params, sizeof(*pclone), GFP_KERNEL);
        if (!pclone) {
                kfree(icst);
-               pr_err("could not clone ICST params\n");
                return ERR_PTR(-ENOMEM);
        }
 
index 7c64a5c1bfc13bb536bdefc1f3dab53273e315fb..a31990408153d13e33211363e2d4e3442f8b9d8e 100644 (file)
@@ -177,7 +177,14 @@ out_fail:
        return ret;
 }
 
-void timer_of_exit(struct timer_of *to)
+/**
+ * timer_of_cleanup - release timer_of ressources
+ * @to: timer_of structure
+ *
+ * Release the ressources that has been used in timer_of_init().
+ * This function should be called in init error cases
+ */
+void __init timer_of_cleanup(struct timer_of *to)
 {
        if (to->flags & TIMER_OF_IRQ)
                timer_irq_exit(&to->of_irq);
index 43f5ba3f8979d9005c09247f5cb87583bef773bf..3f708f1be43d8671359ab5b9d0bc7934b94aec5b 100644 (file)
@@ -68,6 +68,6 @@ static inline unsigned long timer_of_period(struct timer_of *to)
 extern int __init timer_of_init(struct device_node *np,
                                struct timer_of *to);
 
-extern void timer_of_exit(struct timer_of *to);
+extern void __init timer_of_cleanup(struct timer_of *to);
 
 #endif
index 4ebae43118effe98f4763618cbd0777060e8e134..d8addbce40bcc4f9c6a29e32c98cd0c15bac15b4 100644 (file)
@@ -275,6 +275,7 @@ config BMIPS_CPUFREQ
 
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
+       depends on LEMOTE_MACH2F
        help
          This option adds a CPUFreq driver for loongson processors which
          support software configurable cpu frequency.
@@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ
 
 config LOONGSON1_CPUFREQ
        tristate "Loongson1 CPUFreq Driver"
+       depends on LOONGSON1_LS1B
        help
          This option adds a CPUFreq driver for loongson1 processors which
          support software configurable cpu frequency.
index 18c4bd9a5c6564776c7ac5f35e259daae5662f48..e0d5090b303dd3840ddb2a53d2481d6ba6bacf50 100644 (file)
@@ -620,3 +620,7 @@ static int __init mtk_cpufreq_driver_init(void)
        return 0;
 }
 device_initcall(mtk_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("MediaTek CPUFreq driver");
+MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
+MODULE_LICENSE("GPL v2");
index 6833ada237ab7d94540671d811f1dbcde2bb59db..7b0bf825c4e73c588ff93183cf5315665d69e082 100644 (file)
@@ -428,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
        return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
 }
 
+static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       struct file *filp = vma->vm_file;
+       struct dev_dax *dev_dax = filp->private_data;
+       struct dax_region *dax_region = dev_dax->region;
+
+       if (!IS_ALIGNED(addr, dax_region->align))
+               return -EINVAL;
+       return 0;
+}
+
 static const struct vm_operations_struct dax_vm_ops = {
        .fault = dev_dax_fault,
        .huge_fault = dev_dax_huge_fault,
+       .split = dev_dax_split,
 };
 
 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
index 9a302799040e4529bc5d08ce7c17343421ea19b6..5d101c4053e05f2fddb8c656d543afb62fecd05b 100644 (file)
@@ -27,7 +27,6 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/dma_fence.h>
 
-EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
 
index 56cf825ed7799c26d6d5086703cc0c67f43a2442..f3f4f810e5df39b82a68e71305541b391393dbad 100644 (file)
@@ -220,7 +220,7 @@ out_free_cpus:
        return err;
 }
 
-static void dummy_callback(unsigned long ignored) {}
+static void dummy_callback(struct timer_list *unused) {}
 
 static int suspend_cpu(int index, bool broadcast)
 {
@@ -287,7 +287,7 @@ static int suspend_test_thread(void *arg)
        pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
                cpu, drv->state_count - 1);
 
-       setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
+       timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
        for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
                int index;
                /*
index 68b2033bc30e2bd3425e395462faf5d1bfc50670..dfbd894d5bb712d0df01fc37cc99dc199e095de0 100644 (file)
@@ -585,6 +585,13 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
        return ret ? : le32_to_cpu(scm_ret);
 }
 
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+                         size_t mem_sz, phys_addr_t src, size_t src_sz,
+                         phys_addr_t dest, size_t dest_sz)
+{
+       return -ENODEV;
+}
+
 int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
                               u32 spare)
 {
index 3fea6f563ca91f17b380f2a5a138900ddfc0c141..688525dd4aee599548c52502168b3f04f62388fb 100644 (file)
@@ -382,6 +382,33 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
        return ret ? : res.a1;
 }
 
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+                         size_t mem_sz, phys_addr_t src, size_t src_sz,
+                         phys_addr_t dest, size_t dest_sz)
+{
+       int ret;
+       struct qcom_scm_desc desc = {0};
+       struct arm_smccc_res res;
+
+       desc.args[0] = mem_region;
+       desc.args[1] = mem_sz;
+       desc.args[2] = src;
+       desc.args[3] = src_sz;
+       desc.args[4] = dest;
+       desc.args[5] = dest_sz;
+       desc.args[6] = 0;
+
+       desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+                                    QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+                                    QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+       ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+                           QCOM_MEM_PROT_ASSIGN_ID,
+                           &desc, &res);
+
+       return ret ? : res.a1;
+}
+
 int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
 {
        struct qcom_scm_desc desc = {0};
index 9064e559a01f5050737de43d49df4f94df01b5a2..af4c75217ea6647955ff28585b1e512b20eb258f 100644 (file)
@@ -47,6 +47,19 @@ struct qcom_scm {
        u64 dload_mode_addr;
 };
 
+struct qcom_scm_current_perm_info {
+       __le32 vmid;
+       __le32 perm;
+       __le64 ctx;
+       __le32 ctx_size;
+       __le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+       __le64 mem_addr;
+       __le64 mem_size;
+};
+
 static struct qcom_scm *__scm;
 
 static int qcom_scm_clk_enable(void)
@@ -415,6 +428,88 @@ int qcom_scm_set_remote_state(u32 state, u32 id)
 }
 EXPORT_SYMBOL(qcom_scm_set_remote_state);
 
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz:   size of the region.
+ * @srcvm:    vmid for current set of owners, each set bit in
+ *            flag indicate a unique owner
+ * @newvm:    array having new owners and corrsponding permission
+ *            flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure, 0 on success, with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+                       unsigned int *srcvm,
+                       struct qcom_scm_vmperm *newvm, int dest_cnt)
+{
+       struct qcom_scm_current_perm_info *destvm;
+       struct qcom_scm_mem_map_info *mem_to_map;
+       phys_addr_t mem_to_map_phys;
+       phys_addr_t dest_phys;
+       phys_addr_t ptr_phys;
+       size_t mem_to_map_sz;
+       size_t dest_sz;
+       size_t src_sz;
+       size_t ptr_sz;
+       int next_vm;
+       __le32 *src;
+       void *ptr;
+       int ret;
+       int len;
+       int i;
+
+       src_sz = hweight_long(*srcvm) * sizeof(*src);
+       mem_to_map_sz = sizeof(*mem_to_map);
+       dest_sz = dest_cnt * sizeof(*destvm);
+       ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+                       ALIGN(dest_sz, SZ_64);
+
+       ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       /* Fill source vmid detail */
+       src = ptr;
+       len = hweight_long(*srcvm);
+       for (i = 0; i < len; i++) {
+               src[i] = cpu_to_le32(ffs(*srcvm) - 1);
+               *srcvm ^= 1 << (ffs(*srcvm) - 1);
+       }
+
+       /* Fill details of mem buff to map */
+       mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+       mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+       mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
+       mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+
+       next_vm = 0;
+       /* Fill details of next vmid detail */
+       destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+       dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+       for (i = 0; i < dest_cnt; i++) {
+               destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
+               destvm[i].perm = cpu_to_le32(newvm[i].perm);
+               destvm[i].ctx = 0;
+               destvm[i].ctx_size = 0;
+               next_vm |= BIT(newvm[i].vmid);
+       }
+
+       ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+                                   ptr_phys, src_sz, dest_phys, dest_sz);
+       dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+       if (ret) {
+               dev_err(__scm->dev,
+                       "Assign memory protection call failed %d.\n", ret);
+               return -EINVAL;
+       }
+
+       *srcvm = next_vm;
+       return 0;
+}
+EXPORT_SYMBOL(qcom_scm_assign_mem);
+
 static int qcom_scm_probe(struct platform_device *pdev)
 {
        struct qcom_scm *scm;
index 83f171c239430312169596054682c10043c9651f..dcd7f7917fc71a5547f87577682cbca2ec3da078 100644 (file)
@@ -103,5 +103,10 @@ extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
                                             size_t *size);
 extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
                                             u32 size, u32 spare);
+#define QCOM_MEM_PROT_ASSIGN_ID        0x16
+extern int  __qcom_scm_assign_mem(struct device *dev,
+                                 phys_addr_t mem_region, size_t mem_sz,
+                                 phys_addr_t src, size_t src_sz,
+                                 phys_addr_t dest, size_t dest_sz);
 
 #endif
index c21adf60a7f200ba6a4faaa32c163157d00da9ce..057e1ecd83cec5746319adb88602eb95111104c6 100644 (file)
@@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
                return false;
        }
 
-       tmp = bios[0x18] | (bios[0x19] << 8);
-       if (bios[tmp + 0x14] != 0x0) {
-               DRM_INFO("Not an x86 BIOS ROM\n");
-               return false;
-       }
-
        bios_header_start = bios[0x48] | (bios[0x49] << 8);
        if (!bios_header_start) {
                DRM_INFO("Can't locate bios header\n");
index 6c78623e13863c6773d9dd4f7c4444de045f9f2b..a57cec737c18ab1b8db405607042a4363493205b 100644 (file)
@@ -1495,8 +1495,11 @@ out:
        memset(wait, 0, sizeof(*wait));
        wait->out.status = (r > 0);
        wait->out.first_signaled = first;
-       /* set return value 0 to indicate success */
-       r = array[first]->error;
+
+       if (first < fence_count && array[first])
+               r = array[first]->error;
+       else
+               r = 0;
 
 err_free_fence_array:
        for (i = 0; i < fence_count; i++)
index 2d792cdc094cd60e86542c0ed5380c3590198fc2..2c574374d9b6884e6c4473f2dd3ede86b7a612df 100644 (file)
@@ -1837,6 +1837,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_blocks[i].status.hw = false;
        }
 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+               amdgpu_ucode_fini_bo(adev);
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
@@ -3261,9 +3264,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
        pm_pg_lock = (*pos >> 23) & 1;
 
        if (*pos & (1ULL << 62)) {
-               se_bank = (*pos >> 24) & 0x3FF;
-               sh_bank = (*pos >> 34) & 0x3FF;
-               instance_bank = (*pos >> 44) & 0x3FF;
+               se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+               sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+               instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
 
                if (se_bank == 0x3FF)
                        se_bank = 0xFFFFFFFF;
@@ -3337,9 +3340,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
        pm_pg_lock = (*pos >> 23) & 1;
 
        if (*pos & (1ULL << 62)) {
-               se_bank = (*pos >> 24) & 0x3FF;
-               sh_bank = (*pos >> 34) & 0x3FF;
-               instance_bank = (*pos >> 44) & 0x3FF;
+               se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+               sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+               instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
 
                if (se_bank == 0x3FF)
                        se_bank = 0xFFFFFFFF;
@@ -3687,12 +3690,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
                return -EINVAL;
 
        /* decode offset */
-       offset = (*pos & 0x7F);
-       se = ((*pos >> 7) & 0xFF);
-       sh = ((*pos >> 15) & 0xFF);
-       cu = ((*pos >> 23) & 0xFF);
-       wave = ((*pos >> 31) & 0xFF);
-       simd = ((*pos >> 37) & 0xFF);
+       offset = (*pos & GENMASK_ULL(6, 0));
+       se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+       sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+       cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+       wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+       simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
 
        /* switch to the specific se/sh/cu */
        mutex_lock(&adev->grbm_idx_mutex);
@@ -3737,14 +3740,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
                return -EINVAL;
 
        /* decode offset */
-       offset = (*pos & 0xFFF);       /* in dwords */
-       se = ((*pos >> 12) & 0xFF);
-       sh = ((*pos >> 20) & 0xFF);
-       cu = ((*pos >> 28) & 0xFF);
-       wave = ((*pos >> 36) & 0xFF);
-       simd = ((*pos >> 44) & 0xFF);
-       thread = ((*pos >> 52) & 0xFF);
-       bank = ((*pos >> 60) & 1);
+       offset = *pos & GENMASK_ULL(11, 0);
+       se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+       sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+       cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+       wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+       simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+       thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+       bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
 
        data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
        if (!data)
index bd5b8065c32e86fcf4e71c25779cffee2939d203..2fa95aef74d5200449c94a0215f04be60e8cb184 100644 (file)
@@ -268,9 +268,10 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
  *
  * Checks for fence activity.
  */
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
 {
-       struct amdgpu_ring *ring = (void *)arg;
+       struct amdgpu_ring *ring = from_timer(ring, t,
+                                             fence_drv.fallback_timer);
 
        amdgpu_fence_process(ring);
 }
@@ -422,8 +423,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
        atomic_set(&ring->fence_drv.last_seq, 0);
        ring->fence_drv.initialized = false;
 
-       setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
-                   (unsigned long)ring);
+       timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
 
        ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
        spin_lock_init(&ring->fence_drv.lock);
index a418df1b942274579e4da7a767e0138d95c38c90..e87eedcc0da9d5363d7742782281683d6bb842dd 100644 (file)
@@ -63,6 +63,11 @@ retry:
                             flags, NULL, resv, 0, &bo);
        if (r) {
                if (r != -ERESTARTSYS) {
+                       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+                               flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                               goto retry;
+                       }
+
                        if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
                                initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
                                goto retry;
@@ -323,7 +328,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
                                                 bo->tbo.ttm->pages);
                if (r)
-                       goto unlock_mmap_sem;
+                       goto release_object;
 
                r = amdgpu_bo_reserve(bo, true);
                if (r)
@@ -348,9 +353,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 free_pages:
        release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
 
-unlock_mmap_sem:
-       up_read(&current->mm->mmap_sem);
-
 release_object:
        drm_gem_object_put_unlocked(gobj);
 
@@ -556,9 +558,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 
        if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
                dev_err(&dev->pdev->dev,
-                       "va_address 0x%lX is in reserved area 0x%X\n",
-                       (unsigned long)args->va_address,
-                       AMDGPU_VA_RESERVED_SIZE);
+                       "va_address 0x%LX is in reserved area 0x%LX\n",
+                       args->va_address, AMDGPU_VA_RESERVED_SIZE);
                return -EINVAL;
        }
 
index 33535d3477343bab044dba5c1cb171ed6befa475..00e0ce10862f7275655710c2559b2286dacb136b 100644 (file)
@@ -71,12 +71,6 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
 {
        struct amdgpu_gtt_mgr *mgr = man->priv;
 
-       spin_lock(&mgr->lock);
-       if (!drm_mm_clean(&mgr->mm)) {
-               spin_unlock(&mgr->lock);
-               return -EBUSY;
-       }
-
        drm_mm_takedown(&mgr->mm);
        spin_unlock(&mgr->lock);
        kfree(mgr);
index d6df5728df7fca04648daa9677bc03d8dafd830f..6c570d4e4516488b81cd385353b8d02c7e428912 100644 (file)
@@ -946,6 +946,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        umode_t effective_mode = attr->mode;
 
+       /* no skipping for powerplay */
+       if (adev->powerplay.cgs_device)
+               return effective_mode;
+
        /* Skip limit attributes if DPM is not enabled */
        if (!adev->pm.dpm_enabled &&
            (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
index 5f5aa5fddc169355077a4e61665563c087d860f5..033fba2def6f775b35f4f840f7032477c4403ad3 100644 (file)
@@ -164,9 +164,6 @@ static int amdgpu_pp_hw_fini(void *handle)
                ret = adev->powerplay.ip_funcs->hw_fini(
                                        adev->powerplay.pp_handle);
 
-       if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
-               amdgpu_ucode_fini_bo(adev);
-
        return ret;
 }
 
index 90af8e82b16af3f97990b7e71bb672de4b6d0b72..ae9c106979d7de51ce3ec1027593d80d7a897e34 100644 (file)
@@ -169,10 +169,14 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
                                        int flags)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+       struct dma_buf *buf;
 
        if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
            bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
                return ERR_PTR(-EPERM);
 
-       return drm_gem_prime_export(dev, gobj, flags);
+       buf = drm_gem_prime_export(dev, gobj, flags);
+       if (!IS_ERR(buf))
+               buf->file->f_mapping = dev->anon_inode->i_mapping;
+       return buf;
 }
index 447d446b50150d475cb9a01945706b17bbfc2e78..7714f4a6c8b000072c2b7c3d8691884b3d902a35 100644 (file)
@@ -442,8 +442,6 @@ static int psp_hw_fini(void *handle)
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                return 0;
 
-       amdgpu_ucode_fini_bo(adev);
-
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
        amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
index aa914256b4bc75d98015ced2bbe413799f8d3466..bae77353447b793fbcb9ed9b710d923524d4f4f1 100644 (file)
@@ -94,7 +94,8 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_MMHUB                           1
 
 /* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE                        (8 << 20)
+#define AMDGPU_VA_RESERVED_SIZE                        (8ULL << 20)
+
 /* max vmids dedicated for process */
 #define AMDGPU_VM_MAX_RESERVED_VMID    1
 
index 26e90062797173d772b4c71c2f3229ebb1bfbbbc..4acca92f6a52da27b460cfb28bef9a05c571acfa 100644 (file)
@@ -68,11 +68,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
        struct amdgpu_vram_mgr *mgr = man->priv;
 
        spin_lock(&mgr->lock);
-       if (!drm_mm_clean(&mgr->mm)) {
-               spin_unlock(&mgr->lock);
-               return -EBUSY;
-       }
-
        drm_mm_takedown(&mgr->mm);
        spin_unlock(&mgr->lock);
        kfree(mgr);
index 00868764a0dd2cfedbc0317a3f1b7e6216d88424..5c8a7a48a4adb16834ab5893e2341c03a5899d7d 100644 (file)
@@ -4670,6 +4670,14 @@ static int gfx_v7_0_sw_fini(void *handle)
        gfx_v7_0_cp_compute_fini(adev);
        gfx_v7_0_rlc_fini(adev);
        gfx_v7_0_mec_fini(adev);
+       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+                               &adev->gfx.rlc.clear_state_gpu_addr,
+                               (void **)&adev->gfx.rlc.cs_ptr);
+       if (adev->gfx.rlc.cp_table_size) {
+               amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+                               &adev->gfx.rlc.cp_table_gpu_addr,
+                               (void **)&adev->gfx.rlc.cp_table_ptr);
+       }
        gfx_v7_0_free_microcode(adev);
 
        return 0;
index b8002ac3e53691d159050159cb03c5b0ec009e61..9ecdf621a74a14994e9f657d161e8aa9c5cc3a52 100644 (file)
@@ -2118,6 +2118,15 @@ static int gfx_v8_0_sw_fini(void *handle)
 
        gfx_v8_0_mec_fini(adev);
        gfx_v8_0_rlc_fini(adev);
+       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+                               &adev->gfx.rlc.clear_state_gpu_addr,
+                               (void **)&adev->gfx.rlc.cs_ptr);
+       if ((adev->asic_type == CHIP_CARRIZO) ||
+           (adev->asic_type == CHIP_STONEY)) {
+               amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+                               &adev->gfx.rlc.cp_table_gpu_addr,
+                               (void **)&adev->gfx.rlc.cp_table_ptr);
+       }
        gfx_v8_0_free_microcode(adev);
 
        return 0;
index 7f15bb2c5233566b771afc111ac17a5cbfe4ccc8..da43813d67a4ad56ddecb79ac0a749afe29abc43 100644 (file)
@@ -207,6 +207,12 @@ static const u32 golden_settings_gc_9_1_rv1[] =
        SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
 };
 
+static const u32 golden_settings_gc_9_x_common[] =
+{
+       SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
+       SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+};
+
 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
 
@@ -242,6 +248,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
        default:
                break;
        }
+
+       amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+                                       (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
 }
 
 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -988,12 +997,22 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
                start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
 }
 
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+                                    uint32_t wave, uint32_t thread,
+                                    uint32_t start, uint32_t size,
+                                    uint32_t *dst)
+{
+       wave_read_regs(
+               adev, simd, wave, thread,
+               start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
 
 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
        .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
        .select_se_sh = &gfx_v9_0_select_se_sh,
        .read_wave_data = &gfx_v9_0_read_wave_data,
        .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+       .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
 };
 
 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1449,6 +1468,14 @@ static int gfx_v9_0_sw_fini(void *handle)
 
        gfx_v9_0_mec_fini(adev);
        gfx_v9_0_ngg_fini(adev);
+       amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+                               &adev->gfx.rlc.clear_state_gpu_addr,
+                               (void **)&adev->gfx.rlc.cs_ptr);
+       if (adev->asic_type == CHIP_RAVEN) {
+               amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+                               &adev->gfx.rlc.cp_table_gpu_addr,
+                               (void **)&adev->gfx.rlc.cp_table_ptr);
+       }
        gfx_v9_0_free_microcode(adev);
 
        return 0;
index 621699331e090d745194a3ea0fb5407d5140ae65..c8f1aebeac7a90d934f0793535c9e5fa656916e6 100644 (file)
@@ -392,7 +392,16 @@ static int gmc_v9_0_early_init(void *handle)
 static int gmc_v9_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+       /*
+        * The latest engine allocation on gfx9 is:
+        * Engine 0, 1: idle
+        * Engine 2, 3: firmware
+        * Engine 4~13: amdgpu ring, subject to change when ring number changes
+        * Engine 14~15: idle
+        * Engine 16: kfd tlb invalidation
+        * Engine 17: Gart flushes
+        */
+       unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
        unsigned i;
 
        for(i = 0; i < adev->num_rings; ++i) {
@@ -405,9 +414,9 @@ static int gmc_v9_0_late_init(void *handle)
                         ring->funcs->vmhub);
        }
 
-       /* Engine 17 is used for GART flushes */
+       /* Engine 16 is used for KFD and 17 for GART flushes */
        for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
-               BUG_ON(vm_inv_eng[i] > 17);
+               BUG_ON(vm_inv_eng[i] > 16);
 
        return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 }
index a129bc5b18442a3b20743506f852f7e97938dd74..c6febbf0bf693354e5af19fecd18e4047b82cc80 100644 (file)
@@ -1486,7 +1486,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
                                if (vddci_id_buf[i] == virtual_voltage_id) {
                                        for (j = 0; j < profile->ucLeakageBinNum; j++) {
                                                if (efuse_voltage_id <= leakage_bin[j]) {
-                                                       *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+                                                       *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
                                                        break;
                                                }
                                        }
index d1af1483c69bafe427f4835c55a4bea93f31a66c..a651ebcf44fdccbca6e90ed31bcbd62bdbbca38c 100644 (file)
@@ -830,9 +830,9 @@ static int init_over_drive_limits(
                const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
 {
        hwmgr->platform_descriptor.overdriveLimit.engineClock =
-               le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+               le32_to_cpu(powerplay_table->ulMaxODEngineClock);
        hwmgr->platform_descriptor.overdriveLimit.memoryClock =
-               le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+               le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
 
        hwmgr->platform_descriptor.minOverdriveVDDC = 0;
        hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
index 4466469cf8ab816d8d5e0eca6a4ec29358e6cfe4..e33ec7fc5d09a11a0eb654564158edbaf2433f50 100644 (file)
@@ -3778,7 +3778,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Trying to Unfreeze MCLK DPM when DPM is disabled",
                                );
                PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+                               PPSMC_MSG_MCLKDPM_UnfreezeLevel),
                    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
                    return -EINVAL);
        }
index 4f79c21f27ed451451280156afbe9e0ec0c72409..f8d838c2c8eea5f0e86c692510b86d5cd318a1b2 100644 (file)
@@ -753,6 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        uint32_t config_telemetry = 0;
        struct pp_atomfwctrl_voltage_table vol_table;
        struct cgs_system_info sys_info = {0};
+       uint32_t reg;
 
        data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
        if (data == NULL)
@@ -859,6 +860,16 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                        advanceFanControlParameters.usFanPWMMinLimit *
                        hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
 
+       reg = soc15_get_register_offset(DF_HWID, 0,
+                       mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
+                       mmDF_CS_AON0_DramBaseAddress0);
+       data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+                       DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
+                       DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+       PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
+                       "Mem Channel Index Exceeded maximum!",
+                       return -EINVAL);
+
        return result;
 }
 
@@ -1777,7 +1788,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
        struct vega10_single_dpm_table *dpm_table =
                        &(data->dpm_table.mem_table);
        int result = 0;
-       uint32_t i, j, reg, mem_channels;
+       uint32_t i, j;
 
        for (i = 0; i < dpm_table->count; i++) {
                result = vega10_populate_single_memory_level(hwmgr,
@@ -1801,20 +1812,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
                i++;
        }
 
-       reg = soc15_get_register_offset(DF_HWID, 0,
-                       mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
-                       mmDF_CS_AON0_DramBaseAddress0);
-       mem_channels = (cgs_read_register(hwmgr->device, reg) &
-                       DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
-                       DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
-       PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
-                       "Mem Channel Index Exceeded maximum!",
-                       return -1);
-
-       pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
+       pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
        pp_table->MemoryChannelWidth =
-                       cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
-                                       channel_number[mem_channels]);
+                       (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
+                                       channel_number[data->mem_channels]);
 
        pp_table->LowestUclkReservedForUlv =
                        (uint8_t)(data->lowest_uclk_reserved_for_ulv);
index b4b461c3b8ee88b1e3a6fededc8b5b127cd3305e..8f7358cc3327b779a8000d052da6e6e79d622ac3 100644 (file)
@@ -389,6 +389,7 @@ struct vega10_hwmgr {
        uint32_t                       config_telemetry;
        uint32_t                       smu_version;
        uint32_t                       acg_loop_state;
+       uint32_t                       mem_channels;
 };
 
 #define VEGA10_DPM2_NEAR_TDP_DEC                      10
index 704fc893461629d71955e513f61c9e34c2f2a057..25f4b2e9a44fcdd5cef668df014001e1c1b1c90e 100644 (file)
@@ -234,6 +234,10 @@ int drm_connector_init(struct drm_device *dev,
                                   config->link_status_property,
                                   0);
 
+       drm_object_attach_property(&connector->base,
+                                  config->non_desktop_property,
+                                  0);
+
        if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
                drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
        }
@@ -763,6 +767,10 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
  *      value of link-status is "GOOD". If something fails during or after modeset,
  *      the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
  *      should update this value using drm_mode_connector_set_link_status_property().
+ * non_desktop:
+ *     Indicates the output should be ignored for purposes of displaying a
+ *     standard desktop environment or console. This is most likely because
+ *     the output device is not rectilinear.
  *
  * Connectors also have one standardized atomic property:
  *
@@ -811,6 +819,11 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
                return -ENOMEM;
        dev->mode_config.link_status_property = prop;
 
+       prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.non_desktop_property = prop;
+
        return 0;
 }
 
@@ -1194,6 +1207,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
        if (edid)
                size = EDID_LENGTH * (1 + edid->extensions);
 
+       drm_object_property_set_value(&connector->base,
+                                     dev->mode_config.non_desktop_property,
+                                     connector->display_info.non_desktop);
+
        ret = drm_property_replace_global_blob(dev,
                                               &connector->edid_blob_ptr,
                                               size,
index 00ddabfbf980401f8d58a4d46eff8ee60282ee5f..5dfe147638716730573d008edc65abfc92f6d75a 100644 (file)
@@ -82,6 +82,8 @@
 #define EDID_QUIRK_FORCE_6BPC                  (1 << 10)
 /* Force 10bpc */
 #define EDID_QUIRK_FORCE_10BPC                 (1 << 11)
+/* Non desktop display (i.e. HMD) */
+#define EDID_QUIRK_NON_DESKTOP                 (1 << 12)
 
 struct detailed_mode_closure {
        struct drm_connector *connector;
@@ -157,6 +159,9 @@ static const struct edid_quirk {
 
        /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
        { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+
+       /* HTC Vive VR Headset */
+       { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
 };
 
 /*
@@ -4393,7 +4398,7 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
 }
 
 static void drm_add_display_info(struct drm_connector *connector,
-                                struct edid *edid)
+                                struct edid *edid, u32 quirks)
 {
        struct drm_display_info *info = &connector->display_info;
 
@@ -4407,6 +4412,8 @@ static void drm_add_display_info(struct drm_connector *connector,
        info->max_tmds_clock = 0;
        info->dvi_dual = false;
 
+       info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+
        if (edid->revision < 3)
                return;
 
@@ -4627,7 +4634,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
         * To avoid multiple parsing of same block, lets parse that map
         * from sink info, before parsing CEA modes.
         */
-       drm_add_display_info(connector, edid);
+       drm_add_display_info(connector, edid, quirks);
 
        /*
         * EDID spec says modes should be preferred in this order:
@@ -4824,7 +4831,8 @@ void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
                                   const struct drm_display_mode *mode,
                                   enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable)
+                                  bool rgb_quant_range_selectable,
+                                  bool is_hdmi2_sink)
 {
        /*
         * CEA-861:
@@ -4848,8 +4856,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
         *  YQ-field to match the RGB Quantization Range being transmitted
         *  (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
         *  set YQ=1) and the Sink shall ignore the YQ-field."
+        *
+        * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
+        * by non-zero YQ when receiving RGB. There doesn't seem to be any
+        * good way to tell which version of CEA-861 the sink supports, so
+        * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
+        * on on CEA-861-F.
         */
-       if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+       if (!is_hdmi2_sink ||
+           rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
                frame->ycc_quantization_range =
                        HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
        else
index 116d1f1337c7e36da1622ffc88bfc6e218d461a3..07374008f146fa946750b1cfd3bf13bfd1b9c47d 100644 (file)
@@ -2033,6 +2033,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
 {
        bool enable;
 
+       if (connector->display_info.non_desktop)
+               return false;
+
        if (strict)
                enable = connector->status == connector_status_connected;
        else
@@ -2052,7 +2055,8 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
                connector = fb_helper->connector_info[i]->connector;
                enabled[i] = drm_connector_enabled(connector, true);
                DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
-                         enabled[i] ? "yes" : "no");
+                             connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+
                any_enabled |= enabled[i];
        }
 
index 19404e34cd592d4a19720efa8b64d3fc4854569f..37a93cdffb4ad0e7986a634df4d70ccc3fef286e 100644 (file)
@@ -1030,6 +1030,7 @@ retry:
                e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
                e->event.base.length = sizeof(e->event);
                e->event.vbl.user_data = page_flip->user_data;
+               e->event.vbl.crtc_id = crtc->base.id;
                ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
                if (ret) {
                        kfree(e);
index 09c1c4ff93ca4c00948b3104970a628b81a06893..3717b3df34a41fdc2170af50802490d4ee63ad20 100644 (file)
@@ -367,9 +367,9 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
        spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
 
-static void vblank_disable_fn(unsigned long arg)
+static void vblank_disable_fn(struct timer_list *t)
 {
-       struct drm_vblank_crtc *vblank = (void *)arg;
+       struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
        struct drm_device *dev = vblank->dev;
        unsigned int pipe = vblank->pipe;
        unsigned long irqflags;
@@ -436,8 +436,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
                vblank->dev = dev;
                vblank->pipe = i;
                init_waitqueue_head(&vblank->queue);
-               setup_timer(&vblank->disable_timer, vblank_disable_fn,
-                           (unsigned long)vblank);
+               timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
                seqlock_init(&vblank->seqlock);
        }
 
@@ -1019,7 +1018,7 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
                if (drm_vblank_offdelay == 0)
                        return;
                else if (drm_vblank_offdelay < 0)
-                       vblank_disable_fn((unsigned long)vblank);
+                       vblank_disable_fn(&vblank->disable_timer);
                else if (!dev->vblank_disable_immediate)
                        mod_timer(&vblank->disable_timer,
                                  jiffies + ((drm_vblank_offdelay * HZ)/1000));
@@ -1650,7 +1649,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
        spin_unlock_irqrestore(&dev->event_lock, irqflags);
 
        if (disable_irq)
-               vblank_disable_fn((unsigned long)vblank);
+               vblank_disable_fn(&vblank->disable_timer);
 
        return true;
 }
index 53e03f8af3d5ecd4dda4ec649a4e17017cc693c4..e6b0940b1ac273f95a12c9a747fd4097b1e3cd1c 100644 (file)
@@ -161,9 +161,9 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
        .atomic_flush = exynos_crtc_handle_event,
 };
 
-static void vidi_fake_vblank_timer(unsigned long arg)
+static void vidi_fake_vblank_timer(struct timer_list *t)
 {
-       struct vidi_context *ctx = (void *)arg;
+       struct vidi_context *ctx = from_timer(ctx, t, timer);
 
        if (drm_crtc_handle_vblank(&ctx->crtc->base))
                mod_timer(&ctx->timer,
@@ -449,7 +449,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        ctx->pdev = pdev;
 
-       setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
+       timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
 
        mutex_init(&ctx->lock);
 
index 58e9e0601a616b86bc910dc606ea7229c79fa52b..faf17b83b910df4f38202a1a453f9bd48ac82fd7 100644 (file)
@@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
                return PTR_ERR(fsl_dev->state);
        }
 
-       clk_disable_unprepare(fsl_dev->pix_clk);
        clk_disable_unprepare(fsl_dev->clk);
 
        return 0;
@@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        if (fsl_dev->tcon)
                fsl_tcon_bypass_enable(fsl_dev->tcon);
        fsl_dcu_drm_init_planes(fsl_dev->drm);
+       enable_irq(fsl_dev->irq);
        drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
 
        console_lock();
@@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        console_unlock();
 
        drm_kms_helper_poll_enable(fsl_dev->drm);
-       enable_irq(fsl_dev->irq);
 
        return 0;
 }
index edd7d8127d194d87440d721fcce53026fd319592..c54806d08dd78d0080ef42a314fe5ac2db40d7b6 100644 (file)
@@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
 {
        struct drm_encoder *encoder = &fsl_dev->encoder;
        struct drm_connector *connector = &fsl_dev->connector.base;
-       struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
        int ret;
 
        fsl_dev->connector.encoder = encoder;
@@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
        if (ret < 0)
                goto err_sysfs;
 
-       drm_object_property_set_value(&connector->base,
-                                     mode_config->dpms_property,
-                                     DRM_MODE_DPMS_OFF);
-
        ret = drm_panel_attach(panel, connector);
        if (ret) {
                dev_err(fsl_dev->dev, "failed to attach panel\n");
index 4d1f45acf2cdbb0be6b0a9a53d55f6af6fbb28c5..127815253a84522b2e7c59f1f1af175a56b46158 100644 (file)
@@ -601,9 +601,9 @@ tda998x_reset(struct tda998x_priv *priv)
  * we have seen a HPD inactive->active transition.  This code implements
  * that delay.
  */
-static void tda998x_edid_delay_done(unsigned long data)
+static void tda998x_edid_delay_done(struct timer_list *t)
 {
-       struct tda998x_priv *priv = (struct tda998x_priv *)data;
+       struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
 
        priv->edid_delay_active = false;
        wake_up(&priv->edid_delay_waitq);
@@ -1492,8 +1492,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
        mutex_init(&priv->mutex);       /* protect the page access */
        init_waitqueue_head(&priv->edid_delay_waitq);
-       setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
-                   (unsigned long)priv);
+       timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
        INIT_WORK(&priv->detect_work, tda998x_detect_work);
 
        /* wake up the device: */
index 701a3c6f16696f9615ff52d7d81b2589d31bb99f..85d4c57870fb7a2c577803d12e3c0bf219cf056f 100644 (file)
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
        struct intel_shadow_bb_entry *entry_obj;
        struct intel_vgpu *vgpu = s->vgpu;
        unsigned long gma = 0;
-       uint32_t bb_size;
+       int bb_size;
        void *dst = NULL;
        int ret = 0;
 
index 960d3d8b95b8e5d647b85b2baed7b2c5b68d83cf..2cf10d17acfbf3a3c6c6af30afb5f5a01d6103ad 100644 (file)
@@ -1714,6 +1714,7 @@ static int i915_drm_resume(struct drm_device *dev)
        intel_guc_resume(dev_priv);
 
        intel_modeset_init_hw(dev);
+       intel_init_clock_gating(dev_priv);
 
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
@@ -2618,6 +2619,8 @@ static int intel_runtime_resume(struct device *kdev)
                ret = vlv_resume_prepare(dev_priv, true);
        }
 
+       intel_uncore_runtime_resume(dev_priv);
+
        /*
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
index 135fc750a8375f172e130c6b45b85747535693d9..382a77a1097e735468558415d3fa860afbb8c818 100644 (file)
@@ -172,7 +172,9 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT_CACHED;
-       mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+       mn->wq = alloc_workqueue("i915-userptr-release",
+                                WQ_UNBOUND | WQ_MEM_RECLAIM,
+                                0);
        if (mn->wq == NULL) {
                kfree(mn);
                return ERR_PTR(-ENOMEM);
@@ -827,7 +829,7 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 
        dev_priv->mm.userptr_wq =
                alloc_workqueue("i915-userptr-acquire",
-                               WQ_HIGHPRI | WQ_MEM_RECLAIM,
+                               WQ_HIGHPRI | WQ_UNBOUND,
                                0);
        if (!dev_priv->mm.userptr_wq)
                return -ENOMEM;
index 48e1ba01ccf88dba097a9e62e06c77907f01a9ce..5f8b9f1f40f19e84968c18e5fbd229731b392dea 100644 (file)
@@ -517,6 +517,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
 
        GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
        rb_erase(&wait->node, &b->waiters);
+       RB_CLEAR_NODE(&wait->node);
 
 out:
        GEM_BUG_ON(b->irq_wait == wait);
index 5132dc8147884f9ace0af2615f7047f63b15a9a2..4dea833f9d1b78c17239eeade593a72c88fbc166 100644 (file)
@@ -487,7 +487,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                                           crtc_state->limited_color_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
                                           HDMI_QUANTIZATION_RANGE_FULL,
-                                          intel_hdmi->rgb_quant_range_selectable);
+                                          intel_hdmi->rgb_quant_range_selectable,
+                                          is_hdmi2_sink);
 
        /* TODO: handle pixel repetition for YCBCR420 outputs */
        intel_write_infoframe(encoder, crtc_state, &frame);
index 20e3c65c0999f88ee68c09c3aa13533bbe0b1a5c..8c2ce81f01c2e922c168e98c3fdf5d2c504e0e40 100644 (file)
@@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
        i915_check_and_clear_faults(dev_priv);
 }
 
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+       iosf_mbi_register_pmic_bus_access_notifier(
+               &dev_priv->uncore.pmic_bus_access_nb);
+}
+
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
 {
        i915_modparams.enable_rc6 =
@@ -1240,8 +1246,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
                 * bus, which will be busy after this notification, leading to:
                 * "render: timed out waiting for forcewake ack request."
                 * errors.
+                *
+                * The notifier is unregistered during intel_runtime_suspend(),
+                * so it's ok to access the HW here without holding a RPM
+                * wake reference -> disable wakeref asserts for the time of
+                * the access.
                 */
+               disable_rpm_wakeref_asserts(dev_priv);
                intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+               enable_rpm_wakeref_asserts(dev_priv);
                break;
        case MBI_PMIC_BUS_ACCESS_END:
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
index 582771251b57a28122f98a8092bfd1a3211c0590..9ce079b5dd0d85d55e0aeca2b45ce54c5070bcb9 100644 (file)
@@ -134,6 +134,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
 void intel_uncore_fini(struct drm_i915_private *dev_priv);
 void intel_uncore_suspend(struct drm_i915_private *dev_priv);
 void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
index 3790fdf44a1ab7feb4429c76d8524ea50926864b..b26f07b55d861c04b45dbad8d56c4ea9cd414540 100644 (file)
@@ -49,9 +49,9 @@ void onstack_fence_fini(struct i915_sw_fence *fence)
        i915_sw_fence_fini(fence);
 }
 
-static void timed_fence_wake(unsigned long data)
+static void timed_fence_wake(struct timer_list *t)
 {
-       struct timed_fence *tf = (struct timed_fence *)data;
+       struct timed_fence *tf = from_timer(tf, t, timer);
 
        i915_sw_fence_commit(&tf->fence);
 }
@@ -60,7 +60,7 @@ void timed_fence_init(struct timed_fence *tf, unsigned long expires)
 {
        onstack_fence_init(&tf->fence);
 
-       setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+       timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
 
        if (time_after(expires, jiffies))
                mod_timer(&tf->timer, expires);
index 53e0b24beda6e0e2ee44c57550d5c752393b5863..9a9961802f5c39ce7270217903c550b2f01ed92d 100644 (file)
@@ -115,7 +115,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc)
 
        if (crtc->state) {
                if (crtc->state->mode_blob)
-                       drm_property_unreference_blob(crtc->state->mode_blob);
+                       drm_property_blob_put(crtc->state->mode_blob);
 
                state = to_imx_crtc_state(crtc->state);
                memset(state, 0, sizeof(*state));
index 8def97d75030c8c49a64ef63764769bb7d44bf42..aedecda9728a9847d1762cd1e35d4c6fe19c8991 100644 (file)
@@ -183,7 +183,7 @@ static int imx_pd_register(struct drm_device *drm,
                                &imx_pd_connector_helper_funcs);
                drm_connector_init(drm, &imxpd->connector,
                                   &imx_pd_connector_funcs,
-                                  DRM_MODE_CONNECTOR_VGA);
+                                  DRM_MODE_CONNECTOR_DPI);
        }
 
        if (imxpd->panel)
index 40f4840ef98e8273c327b0f350025fa9885f5890..970c7963ae29bfd781a01ceac976abf5fe24764f 100644 (file)
@@ -82,9 +82,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
        return NULL;
 }
 
-static void a5xx_preempt_timer(unsigned long data)
+static void a5xx_preempt_timer(struct timer_list *t)
 {
-       struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+       struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
        struct msm_gpu *gpu = &a5xx_gpu->base.base;
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
@@ -300,6 +300,5 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
                }
        }
 
-       setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
-               (unsigned long) a5xx_gpu);
+       timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
 }
index 8d4477818ec216124952c142189ba4c5a9fa47d2..2322014034398110d879d6c97f7d2e5fe25c68fe 100644 (file)
@@ -353,9 +353,9 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
                        round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
 }
 
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
 {
-       struct msm_gpu *gpu = (struct msm_gpu *)data;
+       struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
@@ -703,8 +703,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        INIT_WORK(&gpu->recover_work, recover_worker);
 
 
-       setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
-                       (unsigned long)gpu);
+       timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
 
        spin_lock_init(&gpu->perf_lock);
 
index b56a05730314ffe0257d157ea27d44859829fb40..c2cf6d98e577bdf7f367042cde89ae6fd196f133 100644 (file)
@@ -4095,7 +4095,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
 }
 
 #ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
 {
        DSSERR("TE not received for 250ms!\n");
 }
@@ -5449,9 +5449,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
                             dsi_framedone_timeout_work_callback);
 
 #ifdef DSI_CATCH_MISSING_TE
-       init_timer(&dsi->te_timer);
-       dsi->te_timer.function = dsi_te_timeout;
-       dsi->te_timer.data = 0;
+       timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
 #endif
 
        dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
index 2fcf805d3a169e9776634fc4f5ddfcbd4b9fbee7..33b821d6d018f37ba1ccf9cf86be81be0f4e11da 100644 (file)
@@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
        }
 
        info->par = rfbdev;
-       info->skip_vt_switch = true;
 
        ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
        if (ret) {
index a553e182ff538b69a212ce121185d10a573966ab..3acfd576b7df894ec9dcc278fc7e02165e606d99 100644 (file)
@@ -101,9 +101,9 @@ static void psr_set_state(struct psr_drv *psr, enum psr_state state)
        spin_unlock_irqrestore(&psr->lock, flags);
 }
 
-static void psr_flush_handler(unsigned long data)
+static void psr_flush_handler(struct timer_list *t)
 {
-       struct psr_drv *psr = (struct psr_drv *)data;
+       struct psr_drv *psr = from_timer(psr, t, flush_timer);
        unsigned long flags;
 
        /* If the state has changed since we initiated the flush, do nothing */
@@ -232,7 +232,7 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
        if (!psr)
                return -ENOMEM;
 
-       setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+       timer_setup(&psr->flush_timer, psr_flush_handler, 0);
        spin_lock_init(&psr->lock);
 
        psr->active = true;
index 4bcacd3f48613d416deea7a32cff7f58c97709b3..b0a1dedac8026e0ad89ee1227cb7b7881d3fee7d 100644 (file)
@@ -174,9 +174,9 @@ struct tegra_sor {
 
        struct reset_control *rst;
        struct clk *clk_parent;
-       struct clk *clk_brick;
        struct clk *clk_safe;
-       struct clk *clk_src;
+       struct clk *clk_out;
+       struct clk *clk_pad;
        struct clk *clk_dp;
        struct clk *clk;
 
@@ -255,7 +255,7 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
 
        clk_disable_unprepare(sor->clk);
 
-       err = clk_set_parent(sor->clk, parent);
+       err = clk_set_parent(sor->clk_out, parent);
        if (err < 0)
                return err;
 
@@ -266,24 +266,24 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
        return 0;
 }
 
-struct tegra_clk_sor_brick {
+struct tegra_clk_sor_pad {
        struct clk_hw hw;
        struct tegra_sor *sor;
 };
 
-static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
 {
-       return container_of(hw, struct tegra_clk_sor_brick, hw);
+       return container_of(hw, struct tegra_clk_sor_pad, hw);
 }
 
-static const char * const tegra_clk_sor_brick_parents[] = {
+static const char * const tegra_clk_sor_pad_parents[] = {
        "pll_d2_out0", "pll_dp"
 };
 
-static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
 {
-       struct tegra_clk_sor_brick *brick = to_brick(hw);
-       struct tegra_sor *sor = brick->sor;
+       struct tegra_clk_sor_pad *pad = to_pad(hw);
+       struct tegra_sor *sor = pad->sor;
        u32 value;
 
        value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
@@ -304,10 +304,10 @@ static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
        return 0;
 }
 
-static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
 {
-       struct tegra_clk_sor_brick *brick = to_brick(hw);
-       struct tegra_sor *sor = brick->sor;
+       struct tegra_clk_sor_pad *pad = to_pad(hw);
+       struct tegra_sor *sor = pad->sor;
        u8 parent = U8_MAX;
        u32 value;
 
@@ -328,33 +328,33 @@ static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
        return parent;
 }
 
-static const struct clk_ops tegra_clk_sor_brick_ops = {
-       .set_parent = tegra_clk_sor_brick_set_parent,
-       .get_parent = tegra_clk_sor_brick_get_parent,
+static const struct clk_ops tegra_clk_sor_pad_ops = {
+       .set_parent = tegra_clk_sor_pad_set_parent,
+       .get_parent = tegra_clk_sor_pad_get_parent,
 };
 
-static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
-                                               const char *name)
+static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
+                                             const char *name)
 {
-       struct tegra_clk_sor_brick *brick;
+       struct tegra_clk_sor_pad *pad;
        struct clk_init_data init;
        struct clk *clk;
 
-       brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
-       if (!brick)
+       pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
+       if (!pad)
                return ERR_PTR(-ENOMEM);
 
-       brick->sor = sor;
+       pad->sor = sor;
 
        init.name = name;
        init.flags = 0;
-       init.parent_names = tegra_clk_sor_brick_parents;
-       init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
-       init.ops = &tegra_clk_sor_brick_ops;
+       init.parent_names = tegra_clk_sor_pad_parents;
+       init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+       init.ops = &tegra_clk_sor_pad_ops;
 
-       brick->hw.init = &init;
+       pad->hw.init = &init;
 
-       clk = devm_clk_register(sor->dev, &brick->hw);
+       clk = devm_clk_register(sor->dev, &pad->hw);
 
        return clk;
 }
@@ -998,8 +998,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
 
        /* switch to safe parent clock */
        err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
-       if (err < 0)
+       if (err < 0) {
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+               return err;
+       }
 
        value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
        value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
@@ -2007,8 +2009,10 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
 
        /* switch to safe parent clock */
        err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
-       if (err < 0)
+       if (err < 0) {
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+               return;
+       }
 
        div = clk_get_rate(sor->clk) / 1000000 * 4;
 
@@ -2111,13 +2115,17 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
 
        /* switch to parent clock */
-       err = clk_set_parent(sor->clk_src, sor->clk_parent);
-       if (err < 0)
-               dev_err(sor->dev, "failed to set source clock: %d\n", err);
-
-       err = tegra_sor_set_parent_clock(sor, sor->clk_src);
-       if (err < 0)
+       err = clk_set_parent(sor->clk, sor->clk_parent);
+       if (err < 0) {
                dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+               return;
+       }
+
+       err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+       if (err < 0) {
+               dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+               return;
+       }
 
        value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
 
@@ -2628,11 +2636,24 @@ static int tegra_sor_probe(struct platform_device *pdev)
        }
 
        if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
-               sor->clk_src = devm_clk_get(&pdev->dev, "source");
-               if (IS_ERR(sor->clk_src)) {
-                       err = PTR_ERR(sor->clk_src);
-                       dev_err(sor->dev, "failed to get source clock: %d\n",
-                               err);
+               struct device_node *np = pdev->dev.of_node;
+               const char *name;
+
+               /*
+                * For backwards compatibility with Tegra210 device trees,
+                * fall back to the old clock name "source" if the new "out"
+                * clock is not available.
+                */
+               if (of_property_match_string(np, "clock-names", "out") < 0)
+                       name = "source";
+               else
+                       name = "out";
+
+               sor->clk_out = devm_clk_get(&pdev->dev, name);
+               if (IS_ERR(sor->clk_out)) {
+                       err = PTR_ERR(sor->clk_out);
+                       dev_err(sor->dev, "failed to get %s clock: %d\n",
+                               name, err);
                        goto remove;
                }
        }
@@ -2658,16 +2679,60 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
+       /*
+        * Starting with Tegra186, the BPMP provides an implementation for
+        * the pad output clock, so we have to look it up from device tree.
+        */
+       sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
+       if (IS_ERR(sor->clk_pad)) {
+               if (sor->clk_pad != ERR_PTR(-ENOENT)) {
+                       err = PTR_ERR(sor->clk_pad);
+                       goto remove;
+               }
+
+               /*
+                * If the pad output clock is not available, then we assume
+                * we're on Tegra210 or earlier and have to provide our own
+                * implementation.
+                */
+               sor->clk_pad = NULL;
+       }
+
+       /*
+        * The bootloader may have set up the SOR such that it's module clock
+        * is sourced by one of the display PLLs. However, that doesn't work
+        * without properly having set up other bits of the SOR.
+        */
+       err = clk_set_parent(sor->clk_out, sor->clk_safe);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
+               goto remove;
+       }
+
        platform_set_drvdata(pdev, sor);
        pm_runtime_enable(&pdev->dev);
 
-       pm_runtime_get_sync(&pdev->dev);
-       sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
-       pm_runtime_put(&pdev->dev);
+       /*
+        * On Tegra210 and earlier, provide our own implementation for the
+        * pad output clock.
+        */
+       if (!sor->clk_pad) {
+               err = pm_runtime_get_sync(&pdev->dev);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
+                               err);
+                       goto remove;
+               }
+
+               sor->clk_pad = tegra_clk_sor_pad_register(sor,
+                                                         "sor1_pad_clkout");
+               pm_runtime_put(&pdev->dev);
+       }
 
-       if (IS_ERR(sor->clk_brick)) {
-               err = PTR_ERR(sor->clk_brick);
-               dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+       if (IS_ERR(sor->clk_pad)) {
+               err = PTR_ERR(sor->clk_pad);
+               dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
+                       err);
                goto remove;
        }
 
index 28fed7e206d030abb23eb1141a7eac672fc702a7..81ac82455ce4d649aadfc99eb85e53a37f74f683 100644 (file)
@@ -12,14 +12,3 @@ config DRM_TILCDC
          controller, for example AM33xx in beagle-bone, DA8xx, or
          OMAP-L1xx.  This driver replaces the FB_DA8XX fbdev driver.
 
-config DRM_TILCDC_SLAVE_COMPAT
-       bool "Support device tree blobs using TI LCDC Slave binding"
-       depends on DRM_TILCDC
-       default y
-       select OF_RESOLVE
-       select OF_OVERLAY
-       help
-         Choose this option if you need a kernel that is compatible
-         with device tree blobs using the obsolete "ti,tilcdc,slave"
-         binding. If you find "ti,tilcdc,slave"-string from your DTB,
-         you probably need this. Otherwise you do not.
index b9e1108e5b4e81e8c6a9926361abff0c5226248a..87f9480e43b05f0fa707f2e84c3fe6f5774fb56d 100644 (file)
@@ -3,9 +3,6 @@ ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
        ccflags-y += -Werror
 endif
 
-obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
-                                        tilcdc_slave_compat.dtb.o
-
 tilcdc-y := \
        tilcdc_plane.o \
        tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
deleted file mode 100644 (file)
index d2b9e5f..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-/*
- * To support the old "ti,tilcdc,slave" binding the binding has to be
- * transformed to the new external encoder binding.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_fdt.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tilcdc_slave_compat.h"
-
-struct kfree_table {
-       int total;
-       int num;
-       void **table;
-};
-
-static int __init kfree_table_init(struct kfree_table *kft)
-{
-       kft->total = 32;
-       kft->num = 0;
-       kft->table = kmalloc(kft->total * sizeof(*kft->table),
-                            GFP_KERNEL);
-       if (!kft->table)
-               return -ENOMEM;
-
-       return 0;
-}
-
-static int __init kfree_table_add(struct kfree_table *kft, void *p)
-{
-       if (kft->num == kft->total) {
-               void **old = kft->table;
-
-               kft->total *= 2;
-               kft->table = krealloc(old, kft->total * sizeof(*kft->table),
-                                     GFP_KERNEL);
-               if (!kft->table) {
-                       kft->table = old;
-                       kfree(p);
-                       return -ENOMEM;
-               }
-       }
-       kft->table[kft->num++] = p;
-       return 0;
-}
-
-static void __init kfree_table_free(struct kfree_table *kft)
-{
-       int i;
-
-       for (i = 0; i < kft->num; i++)
-               kfree(kft->table[i]);
-
-       kfree(kft->table);
-}
-
-static
-struct property * __init tilcdc_prop_dup(const struct property *prop,
-                                        struct kfree_table *kft)
-{
-       struct property *nprop;
-
-       nprop = kzalloc(sizeof(*nprop), GFP_KERNEL);
-       if (!nprop || kfree_table_add(kft, nprop))
-               return NULL;
-
-       nprop->name = kstrdup(prop->name, GFP_KERNEL);
-       if (!nprop->name || kfree_table_add(kft, nprop->name))
-               return NULL;
-
-       nprop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
-       if (!nprop->value || kfree_table_add(kft, nprop->value))
-               return NULL;
-
-       nprop->length = prop->length;
-
-       return nprop;
-}
-
-static void __init tilcdc_copy_props(struct device_node *from,
-                                    struct device_node *to,
-                                    const char * const props[],
-                                    struct kfree_table *kft)
-{
-       struct property *prop;
-       int i;
-
-       for (i = 0; props[i]; i++) {
-               prop = of_find_property(from, props[i], NULL);
-               if (!prop)
-                       continue;
-
-               prop = tilcdc_prop_dup(prop, kft);
-               if (!prop)
-                       continue;
-
-               prop->next = to->properties;
-               to->properties = prop;
-       }
-}
-
-static int __init tilcdc_prop_str_update(struct property *prop,
-                                         const char *str,
-                                         struct kfree_table *kft)
-{
-       prop->value = kstrdup(str, GFP_KERNEL);
-       if (kfree_table_add(kft, prop->value) || !prop->value)
-               return -ENOMEM;
-       prop->length = strlen(str)+1;
-       return 0;
-}
-
-static void __init tilcdc_node_disable(struct device_node *node)
-{
-       struct property *prop;
-
-       prop = kzalloc(sizeof(*prop), GFP_KERNEL);
-       if (!prop)
-               return;
-
-       prop->name = "status";
-       prop->value = "disabled";
-       prop->length = strlen((char *)prop->value)+1;
-
-       of_update_property(node, prop);
-}
-
-static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
-{
-       const int size = __dtb_tilcdc_slave_compat_end -
-               __dtb_tilcdc_slave_compat_begin;
-       static void *overlay_data;
-       struct device_node *overlay;
-
-       if (!size) {
-               pr_warn("%s: No overlay data\n", __func__);
-               return NULL;
-       }
-
-       overlay_data = kmemdup(__dtb_tilcdc_slave_compat_begin,
-                              size, GFP_KERNEL);
-       if (!overlay_data || kfree_table_add(kft, overlay_data))
-               return NULL;
-
-       of_fdt_unflatten_tree(overlay_data, NULL, &overlay);
-       if (!overlay) {
-               pr_warn("%s: Unfattening overlay tree failed\n", __func__);
-               return NULL;
-       }
-
-       return overlay;
-}
-
-static const struct of_device_id tilcdc_slave_of_match[] __initconst = {
-       { .compatible = "ti,tilcdc,slave", },
-       {},
-};
-
-static const struct of_device_id tilcdc_of_match[] __initconst = {
-       { .compatible = "ti,am33xx-tilcdc", },
-       {},
-};
-
-static const struct of_device_id tilcdc_tda998x_of_match[] __initconst = {
-       { .compatible = "nxp,tda998x", },
-       {},
-};
-
-static const char * const tilcdc_slave_props[] __initconst = {
-       "pinctrl-names",
-       "pinctrl-0",
-       "pinctrl-1",
-       NULL
-};
-
-static void __init tilcdc_convert_slave_node(void)
-{
-       struct device_node *slave = NULL, *lcdc = NULL;
-       struct device_node *i2c = NULL, *fragment = NULL;
-       struct device_node *overlay, *encoder;
-       struct property *prop;
-       /* For all memory needed for the overlay tree. This memory can
-          be freed after the overlay has been applied. */
-       struct kfree_table kft;
-       int ovcs_id, ret;
-
-       if (kfree_table_init(&kft))
-               return;
-
-       lcdc = of_find_matching_node(NULL, tilcdc_of_match);
-       slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
-
-       if (!slave || !of_device_is_available(lcdc))
-               goto out;
-
-       i2c = of_parse_phandle(slave, "i2c", 0);
-       if (!i2c) {
-               pr_err("%s: Can't find i2c node trough phandle\n", __func__);
-               goto out;
-       }
-
-       overlay = tilcdc_get_overlay(&kft);
-       if (!overlay)
-               goto out;
-
-       encoder = of_find_matching_node(overlay, tilcdc_tda998x_of_match);
-       if (!encoder) {
-               pr_err("%s: Failed to find tda998x node\n", __func__);
-               goto out;
-       }
-
-       tilcdc_copy_props(slave, encoder, tilcdc_slave_props, &kft);
-
-       for_each_child_of_node(overlay, fragment) {
-               prop = of_find_property(fragment, "target-path", NULL);
-               if (!prop)
-                       continue;
-               if (!strncmp("i2c", (char *)prop->value, prop->length))
-                       if (tilcdc_prop_str_update(prop, i2c->full_name, &kft))
-                               goto out;
-               if (!strncmp("lcdc", (char *)prop->value, prop->length))
-                       if (tilcdc_prop_str_update(prop, lcdc->full_name, &kft))
-                               goto out;
-       }
-
-       tilcdc_node_disable(slave);
-
-       ovcs_id = 0;
-       ret = of_overlay_apply(overlay, &ovcs_id);
-       if (ret)
-               pr_err("%s: Applying overlay changeset failed: %d\n",
-                       __func__, ret);
-       else
-               pr_info("%s: ti,tilcdc,slave node successfully converted\n",
-                       __func__);
-out:
-       kfree_table_free(&kft);
-       of_node_put(i2c);
-       of_node_put(slave);
-       of_node_put(lcdc);
-       of_node_put(fragment);
-}
-
-static int __init tilcdc_slave_compat_init(void)
-{
-       tilcdc_convert_slave_node();
-       return 0;
-}
-
-subsys_initcall(tilcdc_slave_compat_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
deleted file mode 100644 (file)
index 693f8b0..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * DTS overlay for converting ti,tilcdc,slave binding to new binding.
- *
- * Copyright (C) 2015 Texas Instruments Inc.
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- */
-
-/*
- * target-path property values are simple tags that are replaced with
- * correct values in tildcdc_slave_compat.c. Some properties are also
- * copied over from the ti,tilcdc,slave node.
- */
-
-/dts-v1/;
-/ {
-       fragment@0 {
-               target-path = "i2c";
-               __overlay__ {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       tda19988 {
-                               compatible = "nxp,tda998x";
-                               reg = <0x70>;
-                               status = "okay";
-
-                               port {
-                                       hdmi_0: endpoint@0 {
-                                               remote-endpoint = <&lcd_0>;
-                                       };
-                               };
-                       };
-               };
-       };
-
-       fragment@1 {
-               target-path = "lcdc";
-               __overlay__ {
-                       port {
-                               lcd_0: endpoint@0 {
-                                       remote-endpoint = <&hdmi_0>;
-                               };
-                       };
-               };
-       };
-
-       __local_fixups__ {
-               fragment@0 {
-                       __overlay__ {
-                               tda19988 {
-                                       port {
-                                               endpoint@0 {
-                                                       remote-endpoint = <0>;
-                                               };
-                                       };
-                               };
-                       };
-               };
-               fragment@1 {
-                       __overlay__ {
-                               port {
-                                       endpoint@0 {
-                                               remote-endpoint = <0>;
-                                       };
-                               };
-                       };
-               };
-       };
-};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
deleted file mode 100644 (file)
index 403d35d..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-/* This header declares the symbols defined in tilcdc_slave_compat.dts */
-
-#ifndef __TILCDC_SLAVE_COMPAT_H__
-#define __TILCDC_SLAVE_COMPAT_H__
-
-extern uint8_t __dtb_tilcdc_slave_compat_begin[];
-extern uint8_t __dtb_tilcdc_slave_compat_end[];
-
-#endif /* __TILCDC_SLAVE_COMPAT_H__ */
index 316f831ad5f044d99be8bcfc40db59da8e3fdc7a..b0551aa677b82fcb3ca5e26c72e6d612bee9447a 100644 (file)
@@ -744,12 +744,14 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               if (p++ != pages[i + j])
-                                   break;
+                       if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+                               for (j = 0; j < HPAGE_PMD_NR; ++j)
+                                       if (p++ != pages[i + j])
+                                           break;
 
-                       if (j == HPAGE_PMD_NR)
-                               order = HPAGE_PMD_ORDER;
+                               if (j == HPAGE_PMD_NR)
+                                       order = HPAGE_PMD_ORDER;
+                       }
 #endif
 
                        if (page_count(pages[i]) != 1)
@@ -865,20 +867,22 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 
                i = 0;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               while (npages >= HPAGE_PMD_NR) {
-                       gfp_t huge_flags = gfp_flags;
+               if (!(gfp_flags & GFP_DMA32)) {
+                       while (npages >= HPAGE_PMD_NR) {
+                               gfp_t huge_flags = gfp_flags;
 
-                       huge_flags |= GFP_TRANSHUGE;
-                       huge_flags &= ~__GFP_MOVABLE;
-                       huge_flags &= ~__GFP_COMP;
-                       p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
-                       if (!p)
-                               break;
+                               huge_flags |= GFP_TRANSHUGE;
+                               huge_flags &= ~__GFP_MOVABLE;
+                               huge_flags &= ~__GFP_COMP;
+                               p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+                               if (!p)
+                                       break;
 
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               pages[i++] = p++;
+                               for (j = 0; j < HPAGE_PMD_NR; ++j)
+                                       pages[i++] = p++;
 
-                       npages -= HPAGE_PMD_NR;
+                               npages -= HPAGE_PMD_NR;
+                       }
                }
 #endif
 
index 98a6cb9f44fc84fc8d66b8f2b6e35b72745e8363..4ae45d7dac428984fa63081338ac23f283fc5ac8 100644 (file)
@@ -674,10 +674,9 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
        mutex_unlock(&bo->madv_lock);
 }
 
-static void vc4_bo_cache_time_timer(unsigned long data)
+static void vc4_bo_cache_time_timer(struct timer_list *t)
 {
-       struct drm_device *dev = (struct drm_device *)data;
-       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
 
        schedule_work(&vc4->bo_cache.time_work);
 }
@@ -1039,9 +1038,7 @@ int vc4_bo_cache_init(struct drm_device *dev)
        INIT_LIST_HEAD(&vc4->bo_cache.time_list);
 
        INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
-       setup_timer(&vc4->bo_cache.time_timer,
-                   vc4_bo_cache_time_timer,
-                   (unsigned long)dev);
+       timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
 
        return 0;
 }
index e00ac2f3a264b362e6deddff1ffb65714e30ae97..6c32c89a83a96687d2a817522288ad205ffd5c63 100644 (file)
@@ -312,10 +312,10 @@ vc4_reset_work(struct work_struct *work)
 }
 
 static void
-vc4_hangcheck_elapsed(unsigned long data)
+vc4_hangcheck_elapsed(struct timer_list *t)
 {
-       struct drm_device *dev = (struct drm_device *)data;
-       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+       struct drm_device *dev = vc4->dev;
        uint32_t ct0ca, ct1ca;
        unsigned long irqflags;
        struct vc4_exec_info *bin_exec, *render_exec;
@@ -1154,9 +1154,7 @@ vc4_gem_init(struct drm_device *dev)
        spin_lock_init(&vc4->job_lock);
 
        INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
-       setup_timer(&vc4->hangcheck.timer,
-                   vc4_hangcheck_elapsed,
-                   (unsigned long)dev);
+       timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
 
        INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
 
index fa37a1c07cf695900b0cb5b681f727169af51a62..0b2088264039131f05c2f09bc9ba84d2d3793e39 100644 (file)
@@ -424,7 +424,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
                                           vc4_encoder->limited_rgb_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
                                           HDMI_QUANTIZATION_RANGE_FULL,
-                                          vc4_encoder->rgb_range_selectable);
+                                          vc4_encoder->rgb_range_selectable,
+                                          false);
 
        vc4_hdmi_write_infoframe(encoder, &frame);
 }
index 7d7af3a93d941bb9552afc1140c408c8da86be37..61b2e5377993dc319cf6b362b66f0c3db9a768cd 100644 (file)
@@ -208,6 +208,9 @@ vc4_irq_postinstall(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
+       /* Undo the effects of a previous vc4_irq_uninstall. */
+       enable_irq(dev->irq);
+
        /* Enable both the render done and out of memory interrupts. */
        V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
 
@@ -225,6 +228,9 @@ vc4_irq_uninstall(struct drm_device *dev)
        /* Clear any pending interrupts we might have left. */
        V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
 
+       /* Finish any interrupt handler still in flight. */
+       disable_irq(dev->irq);
+
        cancel_work_sync(&vc4->overflow_mem_work);
 }
 
index 8fd52f211e9d9623d0225eb0b549aeac7dd31bca..b28876c222b46c6f9b314354d29fb3fd406a979b 100644 (file)
@@ -85,9 +85,9 @@ static const struct dma_fence_ops vgem_fence_ops = {
        .timeline_value_str = vgem_fence_timeline_value_str,
 };
 
-static void vgem_fence_timeout(unsigned long data)
+static void vgem_fence_timeout(struct timer_list *t)
 {
-       struct vgem_fence *fence = (struct vgem_fence *)data;
+       struct vgem_fence *fence = from_timer(fence, t, timer);
 
        dma_fence_signal(&fence->base);
 }
@@ -105,7 +105,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
        dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
                       dma_fence_context_alloc(1), 1);
 
-       setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+       timer_setup(&fence->timer, vgem_fence_timeout, 0);
 
        /* We force the fence to expire within 10s to prevent driver hangs */
        mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
index 32c9938e1e1eae51923e5953284d33632ab36581..d6e84a589ef1161241950cae10ffe8f0b8323499 100644 (file)
@@ -452,9 +452,9 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
 
 
 static void
-via_dmablit_timer(unsigned long data)
+via_dmablit_timer(struct timer_list *t)
 {
-       drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+       drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
        struct drm_device *dev = blitq->dev;
        int engine = (int)
                (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -559,8 +559,7 @@ via_init_dmablit(struct drm_device *dev)
                        init_waitqueue_head(blitq->blit_queue + j);
                init_waitqueue_head(&blitq->busy_queue);
                INIT_WORK(&blitq->wq, via_dmablit_workqueue);
-               setup_timer(&blitq->poll_timer, via_dmablit_timer,
-                               (unsigned long)blitq);
+               timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
        }
 }
 
index 7a4b8362dda8f4c2691575550b383455c2aadfdb..49bfe6e7d0052937b3b2189420350c226985f7ba 100644 (file)
@@ -249,11 +249,8 @@ EXPORT_SYMBOL_GPL(ipu_dc_enable);
 
 void ipu_dc_enable_channel(struct ipu_dc *dc)
 {
-       int di;
        u32 reg;
 
-       di = dc->di;
-
        reg = readl(dc->base + DC_WR_CH_CONF);
        reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
        writel(reg, dc->base + DC_WR_CH_CONF);
index 07cbc70f00e7dfc8ddbfc7554f8e9c134721bc1f..eae7d52cf1a824acb32ad435f55324011aec28c1 100644 (file)
@@ -173,9 +173,9 @@ static void battery_flat(struct appleir *appleir)
        dev_err(&appleir->input_dev->dev, "possible flat battery?\n");
 }
 
-static void key_up_tick(unsigned long data)
+static void key_up_tick(struct timer_list *t)
 {
-       struct appleir *appleir = (struct appleir *)data;
+       struct appleir *appleir = from_timer(appleir, t, key_up_timer);
        struct hid_device *hid = appleir->hid;
        unsigned long flags;
 
@@ -303,8 +303,7 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
        hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
 
        spin_lock_init(&appleir->lock);
-       setup_timer(&appleir->key_up_timer,
-                   key_up_tick, (unsigned long) appleir);
+       timer_setup(&appleir->key_up_timer, key_up_tick, 0);
 
        hid_set_drvdata(hid, appleir);
 
index 49c4bd34b3c508259fae68e5714d17d7b0a83b70..87eda34ea2f86aa2abb26ac40793ad2e9df40f56 100644 (file)
@@ -239,9 +239,9 @@ drop_note:
        return;
 }
 
-static void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(struct timer_list *t)
 {
-       struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+       struct pcmidi_sustain *pms = from_timer(pms, t, timer);
 
        pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
        pms->in_use = 0;
@@ -256,8 +256,7 @@ static void init_sustain_timers(struct pcmidi_snd *pm)
                pms = &pm->sustained_notes[i];
                pms->in_use = 0;
                pms->pm = pm;
-               setup_timer(&pms->timer, pcmidi_sustained_note_release,
-                       (unsigned long)pms);
+               timer_setup(&pms->timer, pcmidi_sustained_note_release, 0);
        }
 }
 
index d00391418d1ae307ad1ceead4ca39e289a04dfac..579884ebd94db1eb685a1e9477a4dca5a2dcf674 100644 (file)
@@ -1226,9 +1226,9 @@ static void wiimote_schedule(struct wiimote_data *wdata)
        spin_unlock_irqrestore(&wdata->state.lock, flags);
 }
 
-static void wiimote_init_timeout(unsigned long arg)
+static void wiimote_init_timeout(struct timer_list *t)
 {
-       struct wiimote_data *wdata = (void*)arg;
+       struct wiimote_data *wdata = from_timer(wdata, t, timer);
 
        wiimote_schedule(wdata);
 }
@@ -1740,7 +1740,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
        wdata->state.cmd_battery = 0xff;
 
        INIT_WORK(&wdata->init_worker, wiimote_init_worker);
-       setup_timer(&wdata->timer, wiimote_init_timeout, (long)wdata);
+       timer_setup(&wdata->timer, wiimote_init_timeout, 0);
 
        return wdata;
 }
index 46a54ed234105040ecda4644cadeb48d6639aca2..0721e175664aee9fece8a8546c6f41936a7febde 100644 (file)
@@ -81,7 +81,7 @@ struct tctl_offset {
 };
 
 static const struct tctl_offset tctl_offset_table[] = {
-       { 0x17, "AMD Ryzen 7 1600X", 20000 },
+       { 0x17, "AMD Ryzen 5 1600X", 20000 },
        { 0x17, "AMD Ryzen 7 1700X", 20000 },
        { 0x17, "AMD Ryzen 7 1800X", 20000 },
        { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
index 246fb2365126b5b013c2df0b09ef54841e91a4a9..2b0f182daa8752fd0e7b54f91c4ac53d13a2ccba 100644 (file)
@@ -1246,10 +1246,8 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
  exit_remove_files:
        w83781d_remove_files(dev);
-       if (data->lm75[0])
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1])
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
        return err;
 }
 
@@ -1262,10 +1260,8 @@ w83781d_remove(struct i2c_client *client)
        hwmon_device_unregister(data->hwmon_dev);
        w83781d_remove_files(dev);
 
-       if (data->lm75[0])
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1])
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
 
        return 0;
 }
index 8af6081b4ab418c16008ac8c958a2aee49b45f8e..28fa3bd2c0961449e4edd14cc30f3ba2dc8fc007 100644 (file)
@@ -1316,8 +1316,7 @@ static int w83791d_detect_subclients(struct i2c_client *client)
 /* Undo inits in case of errors */
 
 error_sc_1:
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[0]);
 error_sc_0:
        return err;
 }
@@ -1434,10 +1433,8 @@ error5:
 error4:
        sysfs_remove_group(&client->dev.kobj, &w83791d_group);
 error3:
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1] != NULL)
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
        return err;
 }
 
@@ -1448,10 +1445,8 @@ static int w83791d_remove(struct i2c_client *client)
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&client->dev.kobj, &w83791d_group);
 
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1] != NULL)
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
 
        return 0;
 }
index d764602d70dbef20b251ac0e74281afd934e52ec..76aa39e537e0306930bd207f1ef53540458a08fc 100644 (file)
@@ -981,8 +981,7 @@ w83792d_detect_subclients(struct i2c_client *new_client)
 /* Undo inits in case of errors */
 
 ERROR_SC_1:
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[0]);
 ERROR_SC_0:
        return err;
 }
@@ -1456,10 +1455,8 @@ exit_remove_files:
        for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
                sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
 exit_i2c_unregister:
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1] != NULL)
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
        return err;
 }
 
@@ -1475,10 +1472,8 @@ w83792d_remove(struct i2c_client *client)
                sysfs_remove_group(&client->dev.kobj,
                                   &w83792d_group_fan[i]);
 
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1] != NULL)
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
 
        return 0;
 }
index 5ba9d9f1daa1f5f65933edd58bc32adb8525e10b..0af0f6283b353f36a9344a189660426642ef17fa 100644 (file)
@@ -1564,10 +1564,8 @@ static int w83793_remove(struct i2c_client *client)
        for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
                device_remove_file(dev, &w83793_temp[i].dev_attr);
 
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1] != NULL)
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
 
        /* Decrease data reference counter */
        mutex_lock(&watchdog_data_mutex);
@@ -1625,8 +1623,7 @@ w83793_detect_subclients(struct i2c_client *client)
        /* Undo inits in case of errors */
 
 ERROR_SC_1:
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[0]);
 ERROR_SC_0:
        return err;
 }
@@ -1962,10 +1959,8 @@ exit_remove:
        for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
                device_remove_file(dev, &w83793_temp[i].dev_attr);
 
-       if (data->lm75[0] != NULL)
-               i2c_unregister_device(data->lm75[0]);
-       if (data->lm75[1] != NULL)
-               i2c_unregister_device(data->lm75[1]);
+       i2c_unregister_device(data->lm75[0]);
+       i2c_unregister_device(data->lm75[1]);
 free_mem:
        kfree(data);
 exit:
index 6e0a5539a9eac625995d06d343d34f675d558db4..f0f467983960d6b8e23818aac18d235d1988a089 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menuconfig HWSPINLOCK
-       tristate "Hardware Spinlock drivers"
+       bool "Hardware Spinlock drivers"
 
 config HWSPINLOCK_OMAP
        tristate "OMAP Hardware Spinlock device"
index f5f2b62471da919e4ffd859b8a96ae738d38cea8..859ddab9448fe690a16f2c2d6fad9e243a3ea1ee 100644 (file)
@@ -22,7 +22,7 @@
 #define DRV_NAME "ide-pnp"
 
 /* Add your devices here :)) */
-static struct pnp_device_id idepnp_devices[] = {
+static const struct pnp_device_id idepnp_devices[] = {
        /* Generic ESDI/IDE/ATA compatible hard disk controller */
        {.id = "PNP0600", .driver_data = 0},
        {.id = ""}
index ea7adb638d99a71c31366ee89361fd1618e76ed6..2ba2ff5e59c47c125af52e86c32badba3d633249 100644 (file)
@@ -175,9 +175,9 @@ static void ssp_wdt_work_func(struct work_struct *work)
        data->timeout_cnt = 0;
 }
 
-static void ssp_wdt_timer_func(unsigned long ptr)
+static void ssp_wdt_timer_func(struct timer_list *t)
 {
-       struct ssp_data *data = (struct ssp_data *)ptr;
+       struct ssp_data *data = from_timer(data, t, wdt_timer);
 
        switch (data->fw_dl_state) {
        case SSP_FW_DL_STATE_FAIL:
@@ -571,7 +571,7 @@ static int ssp_probe(struct spi_device *spi)
        INIT_WORK(&data->work_wdt, ssp_wdt_work_func);
        INIT_DELAYED_WORK(&data->work_refresh, ssp_refresh_task);
 
-       setup_timer(&data->wdt_timer, ssp_wdt_timer_func, (unsigned long)data);
+       timer_setup(&data->wdt_timer, ssp_wdt_timer_func, 0);
 
        ret = request_threaded_irq(data->spi->irq, NULL,
                                   ssp_irq_thread_fn,
index 21e60b1e2ff41b1c27e98ebad68e5f4b0ccb7f42..130606c3b07c15f03e5481b1cf22831a7c9a8e85 100644 (file)
@@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        sg_list_start = umem->sg_head.sgl;
 
        while (npages) {
-               ret = get_user_pages(cur_base,
+               ret = get_user_pages_longterm(cur_base,
                                     min_t(unsigned long, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
                                     gup_flags, page_list, vma_list);
index 9beee9cef137719f4cdc9f092a4a6f982adfcec2..ee0ee1f9994b4fae933d8590b72ba138d29e176d 100644 (file)
@@ -642,9 +642,9 @@ err:
        return -ENOMEM;
 }
 
-static void delay_time_func(unsigned long ctx)
+static void delay_time_func(struct timer_list *t)
 {
-       struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+       struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
 
        dev->fill_delay = 0;
 }
@@ -663,7 +663,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
                return -ENOMEM;
        }
 
-       setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
+       timer_setup(&dev->delay_timer, delay_time_func, 0);
        for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
                ent = &cache->ent[i];
                INIT_LIST_HEAD(&ent->head);
index f6474c24f193c4edb379dd37847093b7fb217321..ffb98eaaf1c2036e1bdea340a1c3c679bf1062e5 100644 (file)
@@ -130,9 +130,9 @@ static void handle_catas(struct mthca_dev *dev)
        spin_unlock_irqrestore(&catas_lock, flags);
 }
 
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
 {
-       struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
+       struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
        int i;
 
        for (i = 0; i < dev->catas_err.size; ++i)
@@ -149,7 +149,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
 {
        phys_addr_t addr;
 
-       init_timer(&dev->catas_err.timer);
+       timer_setup(&dev->catas_err.timer, poll_catas, 0);
        dev->catas_err.map  = NULL;
 
        addr = pci_resource_start(dev->pdev, 0) +
@@ -164,8 +164,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
                return;
        }
 
-       dev->catas_err.timer.data     = (unsigned long) dev;
-       dev->catas_err.timer.function = poll_catas;
        dev->catas_err.timer.expires  = jiffies + MTHCA_CATAS_POLL_INTERVAL;
        INIT_LIST_HEAD(&dev->catas_err.list);
        add_timer(&dev->catas_err.timer);
index db46b7b53fb4f94a8727864120106fa1e01bbf52..162475aeeedd7ffd30ca0478e9bcbe0c16204e54 100644 (file)
@@ -3819,7 +3819,7 @@ void  nes_port_ibevent(struct nes_vnic *nesvnic)
        if (!nesvnic->event_timer.function) {
                ib_dispatch_event(&event);
                nesvnic->last_dispatched_event = event.event;
-               nesvnic->event_timer.function = (TIMER_FUNC_TYPE)nes_handle_delayed_event;
+               nesvnic->event_timer.function = nes_handle_delayed_event;
                nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
                add_timer(&nesvnic->event_timer);
        } else {
index cedc665364cd6e872ee8a86795b0de19913b8a28..73862a836062ed48b16def4f8ca31930a2227d26 100644 (file)
@@ -202,9 +202,9 @@ void gameport_stop_polling(struct gameport *gameport)
 }
 EXPORT_SYMBOL(gameport_stop_polling);
 
-static void gameport_run_poll_handler(unsigned long d)
+static void gameport_run_poll_handler(struct timer_list *t)
 {
-       struct gameport *gameport = (struct gameport *)d;
+       struct gameport *gameport = from_timer(gameport, t, poll_timer);
 
        gameport->poll_handler(gameport);
        if (gameport->poll_cnt)
@@ -542,8 +542,7 @@ static void gameport_init_port(struct gameport *gameport)
 
        INIT_LIST_HEAD(&gameport->node);
        spin_lock_init(&gameport->timer_lock);
-       setup_timer(&gameport->poll_timer, gameport_run_poll_handler,
-                   (unsigned long)gameport);
+       timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
 }
 
 /*
index 44916ef4a424391199b9738a2e499440cb4ad357..e30642db50d5208cc7f4fe6e3868f6a8a596321b 100644 (file)
@@ -2047,7 +2047,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
  */
 void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
 {
-       dev->timer.function = (TIMER_FUNC_TYPE)input_repeat_key;
+       dev->timer.function = input_repeat_key;
        dev->rep[REP_DELAY] = delay;
        dev->rep[REP_PERIOD] = period;
 }
index f4ad83eab67f66c7a6ebfa956976e02668e9e32a..de0dd4756c8408c7ee877c68b74a64e109bf5a39 100644 (file)
@@ -364,9 +364,9 @@ static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
        return 0;
 }
 
-static void db9_timer(unsigned long private)
+static void db9_timer(struct timer_list *t)
 {
-       struct db9 *db9 = (void *) private;
+       struct db9 *db9 = from_timer(db9, t, timer);
        struct parport *port = db9->pd->port;
        struct input_dev *dev = db9->dev[0];
        struct input_dev *dev2 = db9->dev[1];
@@ -609,7 +609,7 @@ static void db9_attach(struct parport *pp)
        db9->pd = pd;
        db9->mode = mode;
        db9->parportno = pp->number;
-       setup_timer(&db9->timer, db9_timer, (long)db9);
+       timer_setup(&db9->timer, db9_timer, 0);
 
        for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
 
index ca734ea97e53e9fed5dca03f53a59e20607fe569..2ffb2e8bdc3bf456692754a7e6a625e2b572d671 100644 (file)
@@ -743,9 +743,9 @@ static void gc_psx_process_packet(struct gc *gc)
  * gc_timer() initiates reads of console pads data.
  */
 
-static void gc_timer(unsigned long private)
+static void gc_timer(struct timer_list *t)
 {
-       struct gc *gc = (void *) private;
+       struct gc *gc = from_timer(gc, t, timer);
 
 /*
  * N64 pads - must be read first, any read confuses them for 200 us
@@ -974,7 +974,7 @@ static void gc_attach(struct parport *pp)
        mutex_init(&gc->mutex);
        gc->pd = pd;
        gc->parportno = pp->number;
-       setup_timer(&gc->timer, gc_timer, (long) gc);
+       timer_setup(&gc->timer, gc_timer, 0);
 
        for (i = 0; i < n_pads && i < GC_MAX_DEVICES; i++) {
                if (!pads[i])
index a1fdc75a438d10f2d5a7cd9a0edcc75b2403190c..e2685753e460e252a6645ccc479f3f4b2cd4ec02 100644 (file)
@@ -89,9 +89,9 @@ static struct tgfx {
  * tgfx_timer() reads and analyzes TurboGraFX joystick data.
  */
 
-static void tgfx_timer(unsigned long private)
+static void tgfx_timer(struct timer_list *t)
 {
-       struct tgfx *tgfx = (void *) private;
+       struct tgfx *tgfx = from_timer(tgfx, t, timer);
        struct input_dev *dev;
        int data1, data2, i;
 
@@ -200,7 +200,7 @@ static void tgfx_attach(struct parport *pp)
        mutex_init(&tgfx->sem);
        tgfx->pd = pd;
        tgfx->parportno = pp->number;
-       setup_timer(&tgfx->timer, tgfx_timer, (long)tgfx);
+       timer_setup(&tgfx->timer, tgfx_timer, 0);
 
        for (i = 0; i < n_devs; i++) {
                if (n_buttons[i] < 1)
index d3265b6b58b8ebc06b69e8a40983d5303b938054..1173890f6719ca5c801276f3585d0e9ab6b67e0c 100644 (file)
@@ -102,7 +102,7 @@ static inline bool get_down(unsigned long data0, unsigned long data1)
                !(data1 & S3C2410_ADCDAT0_UPDOWN));
 }
 
-static void touch_timer_fire(unsigned long data)
+static void touch_timer_fire(struct timer_list *unused)
 {
        unsigned long data0;
        unsigned long data1;
index 466aaa8ba841c3253226543adfecc030b4e5d6f6..83fe2621effe72bc1cbeecd80df4030235d87328 100644 (file)
@@ -36,7 +36,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
 static void init_iova_rcaches(struct iova_domain *iovad);
 static void free_iova_rcaches(struct iova_domain *iovad);
 static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(unsigned long data);
+static void fq_flush_timeout(struct timer_list *t);
 
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -107,7 +107,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
                spin_lock_init(&fq->lock);
        }
 
-       setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
+       timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
        atomic_set(&iovad->fq_timer_on, 0);
 
        return 0;
@@ -519,9 +519,9 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
        }
 }
 
-static void fq_flush_timeout(unsigned long data)
+static void fq_flush_timeout(struct timer_list *t)
 {
-       struct iova_domain *iovad = (struct iova_domain *)data;
+       struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
        int cpu;
 
        atomic_set(&iovad->fq_timer_on, 0);
index 53380bd72ea4068ec394a60e371b093727a6e543..c70476b34a53f1c76f9bb470ac2a34269acfbf24 100644 (file)
@@ -41,8 +41,15 @@ config ARM_GIC_V3
 
 config ARM_GIC_V3_ITS
        bool
+       select GENERIC_MSI_IRQ_DOMAIN
+       default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+       bool
+       depends on ARM_GIC_V3_ITS
        depends on PCI
        depends on PCI_MSI
+       default ARM_GIC_V3_ITS
 
 config ARM_NVIC
        bool
index dae7282bfdef31fc0ce7475854cdff995dbcae93..d2df34a54d38b1aa00fc6cdc71bf754b4ccecace 100644 (file)
@@ -30,7 +30,8 @@ obj-$(CONFIG_ARM_GIC_PM)              += irq-gic-pm.o
 obj-$(CONFIG_ARCH_REALVIEW)            += irq-gic-realview.o
 obj-$(CONFIG_ARM_GIC_V2M)              += irq-gic-v2m.o
 obj-$(CONFIG_ARM_GIC_V3)               += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS)           += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS)           += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI)       += irq-gic-v3-its-pci-msi.o
 obj-$(CONFIG_PARTITION_PERCPU)         += irq-partition-percpu.o
 obj-$(CONFIG_HISILICON_IRQ_MBIGEN)     += irq-mbigen.o
 obj-$(CONFIG_ARM_NVIC)                 += irq-nvic.o
index 17221143f5057ce35f84f6021bf972f32b61bc48..b56c3e23f0af921142ded1e8bd5438e449bd7cc9 100644 (file)
@@ -1103,18 +1103,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
        int nr_parts;
        struct partition_affinity *parts;
 
-       parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+       parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
        if (!parts_node)
                return;
 
        nr_parts = of_get_child_count(parts_node);
 
        if (!nr_parts)
-               return;
+               goto out_put_node;
 
        parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
        if (WARN_ON(!parts))
-               return;
+               goto out_put_node;
 
        for_each_child_of_node(parts_node, child_part) {
                struct partition_affinity *part;
@@ -1181,6 +1181,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
 
                gic_data.ppi_descs[i] = desc;
        }
+
+out_put_node:
+       of_node_put(parts_node);
 }
 
 static void __init gic_of_setup_kvm_info(struct device_node *node)
@@ -1523,7 +1526,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
 
        err = gic_validate_dist_version(acpi_data.dist_base);
        if (err) {
-               pr_err("No distributor detected at @%p, giving up",
+               pr_err("No distributor detected at @%p, giving up\n",
                       acpi_data.dist_base);
                goto out_dist_unmap;
        }
index cd0bcc3b7e33709a472c1952c5ee7ccdf1ea0382..dba9d67cb9c138856aec6d5b6917408bf7e87355 100644 (file)
@@ -177,6 +177,7 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
                        .map      = map,
                },
        };
+       int ret;
 
        /*
         * The host will never see that interrupt firing again, so it
@@ -184,7 +185,11 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
         */
        irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
 
-       return irq_set_vcpu_affinity(irq, &info);
+       ret = irq_set_vcpu_affinity(irq, &info);
+       if (ret)
+               irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+       return ret;
 }
 
 int its_get_vlpi(int irq, struct its_vlpi_map *map)
index 1f59998e03f806e72ad48434885192ffacf98c70..e80263e16c4c8fe37f7fca956523d1a810567931 100644 (file)
@@ -325,7 +325,7 @@ static int pdc_intc_probe(struct platform_device *pdev)
 
        /* Ioremap the registers */
        priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
-                                     res_regs->end - res_regs->start);
+                                     resource_size(res_regs));
        if (!priv->pdc_base)
                return -EIO;
 
index c25ce5af091addb2f23653feb8fcbac27d5c9180..ec0e6a8cdb7558433d933868520d8d72e7b28b2b 100644 (file)
@@ -156,7 +156,7 @@ static int s3c_irq_type(struct irq_data *data, unsigned int type)
                irq_set_handler(data->irq, handle_level_irq);
                break;
        default:
-               pr_err("No such irq type %d", type);
+               pr_err("No such irq type %d\n", type);
                return -EINVAL;
        }
 
@@ -204,7 +204,7 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
                        break;
 
                default:
-                       pr_err("No such irq type %d", type);
+                       pr_err("No such irq type %d\n", type);
                        return -EINVAL;
        }
 
index 1b6e2f7c59af67ba5e3bdbf1d2387e85ccee1d56..1927b2f36ff6e5e760fce6bd7b5776c51ff9179b 100644 (file)
@@ -196,8 +196,8 @@ static int __init exiu_init(struct device_node *node,
        }
 
        data->base = of_iomap(node, 0);
-       if (IS_ERR(data->base)) {
-               err = PTR_ERR(data->base);
+       if (!data->base) {
+               err = -ENODEV;
                goto out_free;
        }
 
index 6aa3ea4792148d057b9b5d766acd0a7dd5813979..f31265937439608314bf55d70665b55a299a4666 100644 (file)
@@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev)
 {
        struct combiner *combiner;
        size_t alloc_sz;
-       u32 nregs;
+       int nregs;
        int err;
 
        nregs = count_registers(pdev);
index 89dd1303a98a13925c90a09d2089a2870c3ea928..49fef08858c5370e9a0ed67872a8f9ff9aca6cff 100644 (file)
@@ -2235,9 +2235,9 @@ static void send_listen(capidrv_contr *card)
        send_message(card, &cmdcmsg);
 }
 
-static void listentimerfunc(unsigned long x)
+static void listentimerfunc(struct timer_list *t)
 {
-       capidrv_contr *card = (capidrv_contr *)x;
+       capidrv_contr *card = from_timer(card, t, listentimer);
        if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
                printk(KERN_ERR "%s: controller dead ??\n", card->name);
        send_listen(card);
@@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
                return -1;
        }
        card->owner = THIS_MODULE;
-       setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
+       timer_setup(&card->listentimer, listentimerfunc, 0);
        strcpy(card->name, id);
        card->contrnr = contr;
        card->nbchan = profp->nbchannel;
index 6f423bc49d0dcfa0a0bd4ef9321213dbd14de07e..5620fd2c6009dfb4e24664e17fad9ca2db3079d5 100644 (file)
@@ -55,10 +55,10 @@ DEFINE_SPINLOCK(divert_lock);
 /***************************/
 /* timer callback function */
 /***************************/
-static void deflect_timer_expire(ulong arg)
+static void deflect_timer_expire(struct timer_list *t)
 {
        unsigned long flags;
-       struct call_struc *cs = (struct call_struc *) arg;
+       struct call_struc *cs = from_timer(cs, t, timer);
 
        spin_lock_irqsave(&divert_lock, flags);
        del_timer(&cs->timer); /* delete active timer */
@@ -157,7 +157,7 @@ int cf_command(int drvid, int mode,
        /* allocate mem for information struct */
        if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
                return (-ENOMEM); /* no memory */
-       setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs);
+       timer_setup(&cs->timer, deflect_timer_expire, 0);
        cs->info[0] = '\0';
        cs->ics.driver = drvid;
        cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
@@ -450,8 +450,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
                                        return (0); /* no external deflection needed */
                        if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
                                return (0); /* no memory */
-                       setup_timer(&cs->timer, deflect_timer_expire,
-                                   (ulong)cs);
+                       timer_setup(&cs->timer, deflect_timer_expire, 0);
                        cs->info[0] = '\0';
 
                        cs->ics = *ic; /* copy incoming data */
index c61049585cbd7b67f24e057238244b8938500a9e..0033d74a72917e18dd58be87fa99f312ad7b9a70 100644 (file)
@@ -78,7 +78,7 @@ static unsigned int um_idi_poll(struct file *file, poll_table *wait);
 static int um_idi_open(struct inode *inode, struct file *file);
 static int um_idi_release(struct inode *inode, struct file *file);
 static int remove_entity(void *entity);
-static void diva_um_timer_function(unsigned long data);
+static void diva_um_timer_function(struct timer_list *t);
 
 /*
  * proc entry
@@ -300,8 +300,7 @@ static int um_idi_open_adapter(struct file *file, int adapter_nr)
        p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
        init_waitqueue_head(&p_os->read_wait);
        init_waitqueue_head(&p_os->close_wait);
-       setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function,
-                   (unsigned long)p_os);
+       timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
        p_os->aborted = 0;
        p_os->adapter_nr = adapter_nr;
        return (1);
@@ -457,9 +456,9 @@ void diva_os_wakeup_close(void *os_context)
 }
 
 static
-void diva_um_timer_function(unsigned long data)
+void diva_um_timer_function(struct timer_list *t)
 {
-       diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) data;
+       diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
 
        p_os->aborted = 1;
        wake_up_interruptible(&p_os->read_wait);
index 3cf07b8ced1c067c43c3aeae463e306ae1b22c99..4d85645c87f78721a83fcef94be1feb3bce8c094 100644 (file)
@@ -2855,7 +2855,7 @@ irq_notforus:
  */
 
 static void
-hfcmulti_dbusy_timer(struct hfc_multi *hc)
+hfcmulti_dbusy_timer(struct timer_list *t)
 {
 }
 
@@ -3877,8 +3877,7 @@ hfcmulti_initmode(struct dchannel *dch)
                if (hc->dnum[pt]) {
                        mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
                                      -1, 0, -1, 0);
-                       setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
-                                   (long)dch);
+                       timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
                }
                for (i = 1; i <= 31; i++) {
                        if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
@@ -3984,8 +3983,7 @@ hfcmulti_initmode(struct dchannel *dch)
                hc->chan[i].slot_rx = -1;
                hc->chan[i].conf = -1;
                mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
-               setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
-                           (long)dch);
+               timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
                hc->chan[i - 2].slot_tx = -1;
                hc->chan[i - 2].slot_rx = -1;
                hc->chan[i - 2].conf = -1;
index e4ebbee863a17442e96cd6e3e5bb3c9041e5d6da..34c93874af23bc43565119eab4b8b9b5d84a0815 100644 (file)
@@ -301,8 +301,9 @@ reset_hfcpci(struct hfc_pci *hc)
  * Timer function called when kernel timer expires
  */
 static void
-hfcpci_Timer(struct hfc_pci *hc)
+hfcpci_Timer(struct timer_list *t)
 {
+       struct hfc_pci *hc = from_timer(hc, t, hw.timer);
        hc->hw.timer.expires = jiffies + 75;
        /* WD RESET */
 /*
@@ -1241,7 +1242,7 @@ hfcpci_int(int intno, void *dev_id)
  * timer callback for D-chan busy resolution. Currently no function
  */
 static void
-hfcpci_dbusy_timer(struct hfc_pci *hc)
+hfcpci_dbusy_timer(struct timer_list *t)
 {
 }
 
@@ -1717,8 +1718,7 @@ static void
 inithfcpci(struct hfc_pci *hc)
 {
        printk(KERN_DEBUG "inithfcpci: entered\n");
-       setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer,
-                   (long)&hc->dch);
+       timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
        hc->chanlimit = 2;
        mode_hfcpci(&hc->bch[0], 1, -1);
        mode_hfcpci(&hc->bch[1], 2, -1);
@@ -2043,7 +2043,7 @@ setup_hw(struct hfc_pci *hc)
        Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
        /* At this point the needed PCI config is done */
        /* fifos are still not enabled */
-       setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc);
+       timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
        /* default PCM master */
        test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
        return 0;
index 5b078591b6ee846455ac25642c25c8d932bde99d..b791688d0228ccb8921b57775bf6330adb8a96ea 100644 (file)
@@ -1146,9 +1146,9 @@ mISDNisar_irq(struct isar_hw *isar)
 EXPORT_SYMBOL(mISDNisar_irq);
 
 static void
-ftimer_handler(unsigned long data)
+ftimer_handler(struct timer_list *t)
 {
-       struct isar_ch *ch = (struct isar_ch *)data;
+       struct isar_ch *ch = from_timer(ch, t, ftimer);
 
        pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
        test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
@@ -1635,11 +1635,9 @@ init_isar(struct isar_hw *isar)
        }
        if (isar->version != 1)
                return -EINVAL;
-       setup_timer(&isar->ch[0].ftimer, &ftimer_handler,
-                   (long)&isar->ch[0]);
+       timer_setup(&isar->ch[0].ftimer, ftimer_handler, 0);
        test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
-       setup_timer(&isar->ch[1].ftimer, &ftimer_handler,
-                   (long)&isar->ch[1]);
+       timer_setup(&isar->ch[1].ftimer, ftimer_handler, 0);
        test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
        return 0;
 }
index 62f9c43e2377b0cfb9c3f9771d019b588033c898..74c871495e814ac1f0a7c4cfa5906a8381305dc5 100644 (file)
@@ -348,7 +348,7 @@ int setup_asuscom(struct IsdnCard *card)
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
                                        card->para[0] = pnp_irq(pnp_d, 0);
-                                       if (!card->para[0] || !card->para[1]) {
+                                       if (card->para[0] == -1 || !card->para[1]) {
                                                printk(KERN_ERR "AsusPnP:some resources are missing %ld/%lx\n",
                                                       card->para[0], card->para[1]);
                                                pnp_disable_dev(pnp_d);
index daf3742cdef650002eaa76924e681b9f4a04ec74..a18b605fb4f23e97d863b6bc64db924ad58053eb 100644 (file)
@@ -805,7 +805,7 @@ static int avm_pnp_setup(struct IsdnCardState *cs)
                        cs->hw.avm.cfg_reg =
                                pnp_port_start(pnp_avm_d, 0);
                        cs->irq = pnp_irq(pnp_avm_d, 0);
-                       if (!cs->irq) {
+                       if (cs->irq == -1) {
                                printk(KERN_ERR "FritzPnP:No IRQ\n");
                                return (0);
                        }
index 38bdd3f7b9600a4506a7c5d46d838780016fc65c..d23df7a7784df324d63567bd481e109d396d95ce 100644 (file)
@@ -1093,7 +1093,7 @@ static int setup_diva_isapnp(struct IsdnCard *card)
                                }
                                card->para[1] = pnp_port_start(pnp_d, 0);
                                card->para[0] = pnp_irq(pnp_d, 0);
-                               if (!card->para[0] || !card->para[1]) {
+                               if (card->para[0] == -1 || !card->para[1]) {
                                        printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
                                               card->para[0], card->para[1]);
                                        pnp_disable_dev(pnp_d);
index b21c05820f4496cd6ea84f18733fae0567c519ec..0754c074379056ceeaa5cd07efb9e39dc81d1f6e 100644 (file)
@@ -945,7 +945,7 @@ static int setup_elsa_isapnp(struct IsdnCard *card)
                                        card->para[1] = pnp_port_start(pnp_d, 0);
                                        card->para[0] = pnp_irq(pnp_d, 0);
 
-                                       if (!card->para[0] || !card->para[1]) {
+                                       if (card->para[0] == -1 || !card->para[1]) {
                                                printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
                                                       card->para[0], card->para[1]);
                                                pnp_disable_dev(pnp_d);
index d925f579bc802cf54aad8ec09cb0cf1f48dc94f2..4d3b4b2f2612231c345e751b642e691d5bd51c87 100644 (file)
@@ -1423,7 +1423,7 @@ int setup_hfcsx(struct IsdnCard *card)
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
                                        card->para[0] = pnp_irq(pnp_d, 0);
-                                       if (!card->para[0] || !card->para[1]) {
+                                       if (card->para[0] == -1 || !card->para[1]) {
                                                printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
                                                       card->para[0], card->para[1]);
                                                pnp_disable_dev(pnp_d);
index 380bbeda9c74f85872dc7e9929b12ba54b572a57..91b5219499ca362b95339323a74de45dcf506944 100644 (file)
@@ -196,7 +196,7 @@ int setup_hfcs(struct IsdnCard *card)
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
                                        card->para[0] = pnp_irq(pnp_d, 0);
-                                       if (!card->para[0] || !card->para[1]) {
+                                       if (card->para[0] == -1 || !card->para[1]) {
                                                printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
                                                       card->para[0], card->para[1]);
                                                pnp_disable_dev(pnp_d);
index e4f7573ba9bf6777c5fcd8548e1fbdb298f3a499..7a7137d8664b21e4f2c974706c4267c203dff0a1 100644 (file)
@@ -940,6 +940,8 @@ static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        }
        adapter->io = pnp_port_start(pdev, 0);
        adapter->irq = pnp_irq(pdev, 0);
+       if (!adapter->io || adapter->irq == -1)
+               goto err_free;
 
        printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at IO %#x irq %d\n",
               (char *) dev_id->driver_data, adapter->io, adapter->irq);
index 1399ddd4f6cb3b62bdae723b63e0ffe86461a615..53e299be430403c5ac71781fe1b785cdc4507ad5 100644 (file)
@@ -238,7 +238,7 @@ int setup_isurf(struct IsdnCard *card)
                                cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
                                cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
                                cs->irq = pnp_irq(pnp_d, 0);
-                               if (!cs->irq || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
+                               if (cs->irq == -1 || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
                                        printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n",
                                               cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem);
                                        pnp_disable_dev(pnp_d);
index 7ae39f5e865defc018a81aa555787bba5b78f07b..bfb79f3f0a4923a6674e9cf53d2c8d270071abdc 100644 (file)
@@ -256,7 +256,7 @@ int setup_ix1micro(struct IsdnCard *card)
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
                                        card->para[0] = pnp_irq(pnp_d, 0);
-                                       if (!card->para[0] || !card->para[1]) {
+                                       if (card->para[0] == -1 || !card->para[1]) {
                                                printk(KERN_ERR "ITK PnP:some resources are missing %ld/%lx\n",
                                                       card->para[0], card->para[1]);
                                                pnp_disable_dev(pnp_d);
index e4c33cfe3ef489f7a26eefcebdba625e039cb9d5..dfbcd2eaa81a3a6ebfb600418a68fbaa02c92e4a 100644 (file)
@@ -261,7 +261,7 @@ int setup_niccy(struct IsdnCard *card)
                        card->para[1] = pnp_port_start(pnp_d, 0);
                        card->para[2] = pnp_port_start(pnp_d, 1);
                        card->para[0] = pnp_irq(pnp_d, 0);
-                       if (!card->para[0] || !card->para[1] ||
+                       if (card->para[0] == -1 || !card->para[1] ||
                            !card->para[2]) {
                                printk(KERN_ERR "NiccyPnP:some resources are "
                                       "missing %ld/%lx/%lx\n",
index f16a47bcef485cc41dcf688e2d87a48ef1b7aa87..c0b97b893495a77332d7a8fc13d7fc41803b5c4d 100644 (file)
@@ -558,7 +558,7 @@ static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
                                card->para[1] = pnp_port_start(pnp_d, 0);
                                card->para[0] = pnp_irq(pnp_d, 0);
 
-                               if (!card->para[0] || !card->para[1]) {
+                               if (card->para[0] == -1 || !card->para[1]) {
                                        printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
                                               card->para[0], card->para[1]);
                                        pnp_disable_dev(pnp_d);
index 38fb2c1a3f0f48cdbbf3511a467d09e16b042e33..1eef693f04f038c3032efdb6d49a204a2ff6721a 100644 (file)
@@ -306,7 +306,7 @@ int setup_teles3(struct IsdnCard *card)
                                        card->para[2] = pnp_port_start(pnp_d, 1);
                                        card->para[1] = pnp_port_start(pnp_d, 0);
                                        card->para[0] = pnp_irq(pnp_d, 0);
-                                       if (!card->para[0] || !card->para[1] || !card->para[2]) {
+                                       if (card->para[0] == -1 || !card->para[1] || !card->para[2]) {
                                                printk(KERN_ERR "Teles PnP:some resources are missing %ld/%lx/%lx\n",
                                                       card->para[0], card->para[1], card->para[2]);
                                                pnp_disable_dev(pnp_d);
index 38a5bb764c7b55cb8b742639e49756e413b4ab26..8b03d618185e3c7c0c74c7dc8bad6fe1169f7e19 100644 (file)
@@ -231,7 +231,7 @@ static int isdn_timer_cnt2 = 0;
 static int isdn_timer_cnt3 = 0;
 
 static void
-isdn_timer_funct(ulong dummy)
+isdn_timer_funct(struct timer_list *unused)
 {
        int tf = dev->tflags;
        if (tf & ISDN_TIMER_FAST) {
@@ -2294,8 +2294,7 @@ static int __init isdn_init(void)
                printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
                return -EIO;
        }
-       init_timer(&dev->timer);
-       dev->timer.function = isdn_timer_funct;
+       timer_setup(&dev->timer, isdn_timer_funct, 0);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->timerlock);
 #ifdef MODULE
index f63a110b7bcb2d2257869484bd9b894d94b9b5d2..c138f66f26595bcfe714f1b3e1f0838ef4ab6c3f 100644 (file)
@@ -1509,9 +1509,9 @@ static int isdn_net_ioctl(struct net_device *dev,
 
 /* called via cisco_timer.function */
 static void
-isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
+isdn_net_ciscohdlck_slarp_send_keepalive(struct timer_list *t)
 {
-       isdn_net_local *lp = (isdn_net_local *) data;
+       isdn_net_local *lp = from_timer(lp, t, cisco_timer);
        struct sk_buff *skb;
        unsigned char *p;
        unsigned long last_cisco_myseq = lp->cisco_myseq;
@@ -1615,9 +1615,8 @@ isdn_net_ciscohdlck_connected(isdn_net_local *lp)
        /* send slarp request because interface/seq.no.s reset */
        isdn_net_ciscohdlck_slarp_send_request(lp);
 
-       init_timer(&lp->cisco_timer);
-       lp->cisco_timer.data = (unsigned long) lp;
-       lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive;
+       timer_setup(&lp->cisco_timer,
+                   isdn_net_ciscohdlck_slarp_send_keepalive, 0);
        lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
        add_timer(&lp->cisco_timer);
 }
index cd2b3c69771a24b62a7952a8e7c626fde5459c6b..e07aefb9151ded8b057715ec689286d35ef09733 100644 (file)
@@ -50,7 +50,7 @@ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is);
 static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
 static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
                                          unsigned char id);
-static void isdn_ppp_ccp_timer_callback(unsigned long closure);
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t);
 static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
                                                                   unsigned char id);
 static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
@@ -2327,10 +2327,10 @@ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
 
 /* The timer callback function which is called when a ResetReq has timed out,
    aka has never been answered by a ResetAck */
-static void isdn_ppp_ccp_timer_callback(unsigned long closure)
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t)
 {
        struct ippp_ccp_reset_state *rs =
-               (struct ippp_ccp_reset_state *)closure;
+               from_timer(rs, t, timer);
 
        if (!rs) {
                printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
@@ -2376,8 +2376,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
                rs->state = CCPResetIdle;
                rs->is = is;
                rs->id = id;
-               setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback,
-                           (unsigned long)rs);
+               timer_setup(&rs->timer, isdn_ppp_ccp_timer_callback, 0);
                is->reset->rs[id] = rs;
        }
        return rs;
index d30130c8d0f3d356b25aad643fc4c5736e24692e..960f26348bb58e00f81166444a53d7544f8e7f68 100644 (file)
@@ -541,9 +541,9 @@ isdn_tty_senddown(modem_info *info)
  * into the tty's buffer.
  */
 static void
-isdn_tty_modem_do_ncarrier(unsigned long data)
+isdn_tty_modem_do_ncarrier(struct timer_list *t)
 {
-       modem_info *info = (modem_info *) data;
+       modem_info *info = from_timer(info, t, nc_timer);
        isdn_tty_modem_result(RESULT_NO_CARRIER, info);
 }
 
@@ -1812,8 +1812,7 @@ isdn_tty_modem_init(void)
                info->isdn_channel = -1;
                info->drv_index = -1;
                info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
-               setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier,
-                           (unsigned long)info);
+               timer_setup(&info->nc_timer, isdn_tty_modem_do_ncarrier, 0);
                skb_queue_head_init(&info->xmit_queue);
 #ifdef CONFIG_ISDN_AUDIO
                skb_queue_head_init(&info->dtmf_queue);
index ce90213a42faea35805a5d964739a54c835a00d3..76516ee84e9adb63ecc6c670aad17f30edc72fb2 100644 (file)
@@ -270,9 +270,9 @@ static void pblk_write_kick(struct pblk *pblk)
        mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
 }
 
-void pblk_write_timer_fn(unsigned long data)
+void pblk_write_timer_fn(struct timer_list *t)
 {
-       struct pblk *pblk = (struct pblk *)data;
+       struct pblk *pblk = from_timer(pblk, t, wtimer);
 
        /* kick the write thread every tick to flush outstanding data */
        pblk_write_kick(pblk);
index 00d5698d64a9a58852b2f91dcc1d4bac6c395320..9c8e114c8a545ca45cbea56660a2838f4a67e812 100644 (file)
@@ -442,9 +442,9 @@ next_gc_group:
                goto next_gc_group;
 }
 
-static void pblk_gc_timer(unsigned long data)
+static void pblk_gc_timer(struct timer_list *t)
 {
-       struct pblk *pblk = (struct pblk *)data;
+       struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
 
        pblk_gc_kick(pblk);
 }
@@ -601,7 +601,7 @@ int pblk_gc_init(struct pblk *pblk)
                goto fail_free_writer_kthread;
        }
 
-       setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
+       timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
        mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
 
        gc->gc_active = 0;
index f62112ba5482a345e33fa4bddb537b25d3c2db95..695826a06b5d2f87150f63348db49c67d96f38eb 100644 (file)
@@ -866,7 +866,7 @@ fail:
 
 static int pblk_writer_init(struct pblk *pblk)
 {
-       setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
+       timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
        mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
 
        pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
index abae31fd434e5bd667e3ba761ace2189f250397a..dacc71922260b8bd2ccb80e2732338b09a73bb2d 100644 (file)
@@ -158,9 +158,9 @@ int pblk_rl_max_io(struct pblk_rl *rl)
        return rl->rb_max_io;
 }
 
-static void pblk_rl_u_timer(unsigned long data)
+static void pblk_rl_u_timer(struct timer_list *t)
 {
-       struct pblk_rl *rl = (struct pblk_rl *)data;
+       struct pblk_rl *rl = from_timer(rl, t, u_timer);
 
        /* Release user I/O state. Protect from GC */
        smp_store_release(&rl->rb_user_active, 0);
@@ -202,7 +202,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
        atomic_set(&rl->rb_gc_cnt, 0);
        atomic_set(&rl->rb_space, -1);
 
-       setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
+       timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
 
        rl->rb_user_active = 0;
        rl->rb_gc_active = 0;
index 90961033a79fcd2af5c8ec425aec3978c29e9fa5..59a64d461a5dcf1e25c07000293fab0a7642a7ee 100644 (file)
@@ -797,7 +797,7 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
  * pblk write thread
  */
 int pblk_write_ts(void *data);
-void pblk_write_timer_fn(unsigned long data);
+void pblk_write_timer_fn(struct timer_list *t);
 void pblk_write_should_kick(struct pblk *pblk);
 
 /*
index 267f01ae87e447b6fba7e2fe47f39a1a926416f4..0993c14be86011c63d91373bada4c9e8363865ec 100644 (file)
@@ -267,9 +267,9 @@ static void rrpc_gc_kick(struct rrpc *rrpc)
 /*
  * timed GC every interval.
  */
-static void rrpc_gc_timer(unsigned long data)
+static void rrpc_gc_timer(struct timer_list *t)
 {
-       struct rrpc *rrpc = (struct rrpc *)data;
+       struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
 
        rrpc_gc_kick(rrpc);
        mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
@@ -1063,7 +1063,7 @@ static int rrpc_gc_init(struct rrpc *rrpc)
        if (!rrpc->kgc_wq)
                return -ENOMEM;
 
-       setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
+       timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
 
        return 0;
 }
index ce8d78c137f05264763296fc45666c3cb230378c..e1d369b976edc836f4c4ac2b4326faf01eda24d6 100644 (file)
@@ -402,7 +402,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
                            sizeof(struct saa7146_buf),
                            file, &dev->v4l2_lock);
 
-       vv->vbi_read_timeout.function = (TIMER_FUNC_TYPE)vbi_read_timeout;
+       vv->vbi_read_timeout.function = vbi_read_timeout;
        vv->vbi_read_timeout_file = file;
 
        /* initialize the brs */
index fb43025df57379834a99e1be3d6124c827cd91a9..dba21215dc845df2aec191127bf59502bdc903cd 100644 (file)
@@ -339,9 +339,9 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
        }
 }
 
-static void viu_vid_timeout(unsigned long data)
+static void viu_vid_timeout(struct timer_list *t)
 {
-       struct viu_dev *dev = (struct viu_dev *)data;
+       struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
        struct viu_buf *buf;
        struct viu_dmaqueue *vidq = &dev->vidq;
 
@@ -1466,8 +1466,7 @@ static int viu_of_probe(struct platform_device *op)
        viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
                        "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
 
-       setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout,
-                   (unsigned long)viu_dev);
+       timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
        viu_dev->std = V4L2_STD_NTSC_M;
        viu_dev->first = 1;
 
index 1839a86cc2a557dde0dd590eda4dadf1bc5d0815..bc68dbbcaec1667e29088ecb35f17944ec1ba85f 100644 (file)
@@ -145,9 +145,9 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
        }
 }
 
-static void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(struct timer_list *t)
 {
-       struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+       struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
 
        if (test_bit(0, &dev->hw_lock))
                atomic_inc(&dev->watchdog_cnt);
@@ -1314,9 +1314,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
        dev->hw_lock = 0;
        INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
        atomic_set(&dev->watchdog_cnt, 0);
-       init_timer(&dev->watchdog_timer);
-       dev->watchdog_timer.data = (unsigned long)dev;
-       dev->watchdog_timer.function = s5p_mfc_watchdog;
+       timer_setup(&dev->watchdog_timer, s5p_mfc_watchdog, 0);
 
        ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
        if (ret)
index 59280ac319374c11103a30ec6123a8f3192a1577..a0acee7671b16170ab9f19f430349d1aec00aeba 100644 (file)
@@ -61,9 +61,9 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei);
 
 #define FIFO_LEN 1024
 
-static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
 {
-       struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+       struct c8sectpfei *fei = from_timer(fei, t, timer);
        struct channel_info *channel;
        int chan_num;
 
@@ -865,8 +865,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
        }
 
        /* Setup timer interrupt */
-       setup_timer(&fei->timer, c8sectpfe_timer_interrupt,
-                   (unsigned long)fei);
+       timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
 
        mutex_init(&fei->lock);
 
index b01fba020d5f7dba97a6e23bda9763b411976a8d..7bf9fa2f8534bf456e0f1992f014247cc119fbc1 100644 (file)
@@ -388,9 +388,9 @@ static void device_run(void *priv)
        schedule_irq(dev, ctx->transtime);
 }
 
-static void device_isr(unsigned long priv)
+static void device_isr(struct timer_list *t)
 {
-       struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
+       struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
        struct vim2m_ctx *curr_ctx;
        struct vb2_v4l2_buffer *src_vb, *dst_vb;
        unsigned long flags;
@@ -1024,7 +1024,7 @@ static int vim2m_probe(struct platform_device *pdev)
        v4l2_info(&dev->v4l2_dev,
                        "Device registered as /dev/video%d\n", vfd->num);
 
-       setup_timer(&dev->timer, device_isr, (long)dev);
+       timer_setup(&dev->timer, device_isr, 0);
        platform_set_drvdata(pdev, dev);
 
        dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
index 34dc7e062471c651192f7d5a01ff2780fc28f64e..d9093a3c57c5b275ad4e0956c17832f31e023e64 100644 (file)
@@ -105,9 +105,9 @@ static struct tda18271_config hauppauge_woodbury_tunerconfig = {
 
 static void au0828_restart_dvb_streaming(struct work_struct *work);
 
-static void au0828_bulk_timeout(unsigned long data)
+static void au0828_bulk_timeout(struct timer_list *t)
 {
-       struct au0828_dev *dev = (struct au0828_dev *) data;
+       struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
 
        dprintk(1, "%s called\n", __func__);
        dev->bulk_timeout_running = 0;
@@ -648,9 +648,7 @@ int au0828_dvb_register(struct au0828_dev *dev)
                return ret;
        }
 
-       dev->bulk_timeout.function = au0828_bulk_timeout;
-       dev->bulk_timeout.data = (unsigned long) dev;
-       init_timer(&dev->bulk_timeout);
+       timer_setup(&dev->bulk_timeout, au0828_bulk_timeout, 0);
 
        return 0;
 }
index 654f67c258635152cf4d4ca7f6f12228f52cdc8e..a240153821e0cd803652804a25f0e998c207305a 100644 (file)
@@ -954,9 +954,9 @@ int au0828_analog_unregister(struct au0828_dev *dev)
 /* This function ensures that video frames continue to be delivered even if
    the ITU-656 input isn't receiving any data (thereby preventing applications
    such as tvtime from hanging) */
-static void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(struct timer_list *t)
 {
-       struct au0828_dev *dev = (struct au0828_dev *) data;
+       struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
        struct au0828_dmaqueue *dma_q = &dev->vidq;
        struct au0828_buffer *buf;
        unsigned char *vid_data;
@@ -978,9 +978,9 @@ static void au0828_vid_buffer_timeout(unsigned long data)
        spin_unlock_irqrestore(&dev->slock, flags);
 }
 
-static void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(struct timer_list *t)
 {
-       struct au0828_dev *dev = (struct au0828_dev *) data;
+       struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
        struct au0828_dmaqueue *dma_q = &dev->vbiq;
        struct au0828_buffer *buf;
        unsigned char *vbi_data;
@@ -1953,10 +1953,8 @@ int au0828_analog_register(struct au0828_dev *dev,
        INIT_LIST_HEAD(&dev->vidq.active);
        INIT_LIST_HEAD(&dev->vbiq.active);
 
-       setup_timer(&dev->vid_timeout, au0828_vid_buffer_timeout,
-                   (unsigned long)dev);
-       setup_timer(&dev->vbi_timeout, au0828_vbi_buffer_timeout,
-                   (unsigned long)dev);
+       timer_setup(&dev->vid_timeout, au0828_vid_buffer_timeout, 0);
+       timer_setup(&dev->vbi_timeout, au0828_vbi_buffer_timeout, 0);
 
        dev->width = NTSC_STD_W;
        dev->height = NTSC_STD_H;
index 0b5c43f7e020da59c939369ebd9d0a27a116ce2b..f412429cf5ba586958a3693a35eee48ad7043c51 100644 (file)
@@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
        dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
                data, size, dma->nr_pages);
 
-       err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+       err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
                             flags, dma->pages, NULL);
 
        if (err != dma->nr_pages) {
                dma->nr_pages = (err >= 0) ? err : 0;
-               dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+               dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+                       dma->nr_pages);
                return err < 0 ? err : -EINVAL;
        }
        return 0;
index 22de7f5ed03236cda482dc8e994471522b6b39b5..57b13dfbd21e2d3ba14d46b6d7288fc12ca23898 100644 (file)
@@ -1492,9 +1492,9 @@ static int msb_ftl_scan(struct msb_data *msb)
        return 0;
 }
 
-static void msb_cache_flush_timer(unsigned long data)
+static void msb_cache_flush_timer(struct timer_list *t)
 {
-       struct msb_data *msb = (struct msb_data *)data;
+       struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
        msb->need_flush_cache = true;
        queue_work(msb->io_queue, &msb->io_work);
 }
@@ -1514,8 +1514,7 @@ static void msb_cache_discard(struct msb_data *msb)
 
 static int msb_cache_init(struct msb_data *msb)
 {
-       setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
-               (unsigned long)msb);
+       timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
 
        if (!msb->cache)
                msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
index 691dab791f7af81d91ed3892a945c74c1f0d3fa1..59d61b04c197b64e1735c8641a673a19fefc0c7e 100644 (file)
@@ -40,9 +40,9 @@ static const struct mfd_cell rtsx_usb_cells[] = {
        },
 };
 
-static void rtsx_usb_sg_timed_out(unsigned long data)
+static void rtsx_usb_sg_timed_out(struct timer_list *t)
 {
-       struct rtsx_ucr *ucr = (struct rtsx_ucr *)data;
+       struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
 
        dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
        usb_sg_cancel(&ucr->current_sg);
@@ -663,7 +663,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
                goto out_init_fail;
 
        /* initialize USB SG transfer timer */
-       setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
+       timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
 
        ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
                                      ARRAY_SIZE(rtsx_usb_cells));
index b0f7af872bb56c40759201c363a0b3812c30b04b..7eebbdfbcacd0a51aef6f1d0c70e794b45ac3c03 100644 (file)
@@ -63,9 +63,11 @@ void lkdtm_BUG(void)
        BUG();
 }
 
+static int warn_counter;
+
 void lkdtm_WARNING(void)
 {
-       WARN_ON(1);
+       WARN(1, "Warning message trigger count: %d\n", warn_counter++);
 }
 
 void lkdtm_EXCEPTION(void)
index 35a9e4fd1a9f514ae61e4ec77455915e4870009e..64b03d6eaf184a30c133440299f5820943c8b2bf 100644 (file)
@@ -160,9 +160,9 @@ out:
        return err;
 }
 
-static void mmc_retune_timer(unsigned long data)
+static void mmc_retune_timer(struct timer_list *t)
 {
-       struct mmc_host *host = (struct mmc_host *)data;
+       struct mmc_host *host = from_timer(host, t, retune_timer);
 
        mmc_retune_needed(host);
 }
@@ -389,7 +389,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        init_waitqueue_head(&host->wq);
        INIT_DELAYED_WORK(&host->detect, mmc_rescan);
        INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
-       setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
+       timer_setup(&host->retune_timer, mmc_retune_timer, 0);
 
        /*
         * By default, hosts do not support SGIO or large requests.
index 5a2d71729b9ace7a67e8216f7d5ba61fddb471d7..2a8ac6829d42b187e20f6f2a7c80b400d19a423b 100644 (file)
@@ -1,6 +1,5 @@
 menuconfig MTD
        tristate "Memory Technology Device (MTD) support"
-       depends on GENERIC_IO
        help
          Memory Technology Devices are flash, RAM and similar chips, often
          used for solid state file systems on embedded devices. This option
index afb43d5e178269d33443a59a439245263cb263c0..1cd0fff0e9402d9c1ee477674be5b90611dcb561 100644 (file)
@@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
 static int mapram_erase (struct mtd_info *, struct erase_info *);
 static void mapram_nop (struct mtd_info *);
 static struct mtd_info *map_ram_probe(struct map_info *map);
-static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
-                                         unsigned long, unsigned long);
+static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
+                        size_t *retlen, void **virt, resource_size_t *phys);
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
 
 
 static struct mtd_chip_driver mapram_chipdrv = {
@@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
        mtd->type = MTD_RAM;
        mtd->size = map->size;
        mtd->_erase = mapram_erase;
-       mtd->_get_unmapped_area = mapram_unmapped_area;
        mtd->_read = mapram_read;
        mtd->_write = mapram_write;
        mtd->_panic_write = mapram_write;
+       mtd->_point = mapram_point;
        mtd->_sync = mapram_nop;
+       mtd->_unpoint = mapram_unpoint;
        mtd->flags = MTD_CAP_RAM;
        mtd->writesize = 1;
 
@@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
        return mtd;
 }
 
-
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
-                                         unsigned long len,
-                                         unsigned long offset,
-                                         unsigned long flags)
+static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
+                       size_t *retlen, void **virt, resource_size_t *phys)
 {
        struct map_info *map = mtd->priv;
-       return (unsigned long) map->virt + offset;
+
+       if (!map->virt)
+               return -EINVAL;
+       *virt = map->virt + from;
+       if (phys)
+               *phys = map->phys + from;
+       *retlen = len;
+       return 0;
+}
+
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+       return 0;
 }
 
 static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
index e67f73ab44c9db23eae052c206139a03aafe15b9..20e3604b4d7169fef2c65daeba87dbe74b502baf 100644 (file)
@@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
 static void maprom_nop (struct mtd_info *);
 static struct mtd_info *map_rom_probe(struct map_info *map);
 static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
-static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
-                                         unsigned long, unsigned long);
+static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
+                        size_t *retlen, void **virt, resource_size_t *phys);
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
 
 static struct mtd_chip_driver maprom_chipdrv = {
        .probe  = map_rom_probe,
@@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
        mtd->name = map->name;
        mtd->type = MTD_ROM;
        mtd->size = map->size;
-       mtd->_get_unmapped_area = maprom_unmapped_area;
+       mtd->_point = maprom_point;
+       mtd->_unpoint = maprom_unpoint;
        mtd->_read = maprom_read;
        mtd->_write = maprom_write;
        mtd->_sync = maprom_nop;
@@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
 }
 
 
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
-                                         unsigned long len,
-                                         unsigned long offset,
-                                         unsigned long flags)
+static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
+                       size_t *retlen, void **virt, resource_size_t *phys)
 {
        struct map_info *map = mtd->priv;
-       return (unsigned long) map->virt + offset;
+
+       if (!map->virt)
+               return -EINVAL;
+       *virt = map->virt + from;
+       if (phys)
+               *phys = map->phys + from;
+       *retlen = len;
+       return 0;
+}
+
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+       return 0;
 }
 
 static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
index 84b16133554bebf60f4e42c84bc1fd4274c477a8..0806f72102c09d03469c8124b75e699333fca652 100644 (file)
@@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
        struct dentry *root = floor->dbg.dfs_dir;
        struct docg3 *docg3 = floor->priv;
 
-       if (IS_ERR_OR_NULL(root))
+       if (IS_ERR_OR_NULL(root)) {
+               if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+                   !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+                       dev_warn(floor->dev.parent,
+                                "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
                return;
+       }
 
        debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
                            &flashcontrol_fops);
index 268aae45b5149de12ff77e59df776790724dede8..555b94406e0bb31a83c92fd03a70250cce015842 100644 (file)
@@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = {
        }
 };
 
-static struct mtd_partition lart_partitions[] = {
+static const struct mtd_partition lart_partitions[] = {
        /* blob */
        {
                .name   = "blob",
index 00eea6fd379cc68d51dbe197eec9b8b310341fdb..dbe6a1de2bb822fda74d0af92a9d08ebc7eb30ea 100644 (file)
@@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = {
        {"m25p32-nonjedec"},    {"m25p64-nonjedec"},    {"m25p128-nonjedec"},
 
        /* Everspin MRAMs (non-JEDEC) */
+       { "mr25h128" }, /* 128 Kib, 40 MHz */
        { "mr25h256" }, /* 256 Kib, 40 MHz */
        { "mr25h10" },  /*   1 Mib, 40 MHz */
        { "mr25h40" },  /*   4 Mib, 40 MHz */
index cbd8547d7aad8654b14ae20e8d00a69b2c861463..0bf4aeaf0cb8cf14665ee8474c3faeb68479fd88 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/mtdram.h>
@@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
 {
        *virt = mtd->priv + from;
        *retlen = len;
+
+       if (phys) {
+               /* limit retlen to the number of contiguous physical pages */
+               unsigned long page_ofs = offset_in_page(*virt);
+               void *addr = *virt - page_ofs;
+               unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
+
+               *phys = __pfn_to_phys(pfn0) + page_ofs;
+               len += page_ofs;
+               while (len > PAGE_SIZE) {
+                       len -= PAGE_SIZE;
+                       addr += PAGE_SIZE;
+                       pfn0++;
+                       pfn1 = vmalloc_to_pfn(addr);
+                       if (pfn1 != pfn0) {
+                               *retlen = addr - *virt;
+                               break;
+                       }
+               }
+       }
+
        return 0;
 }
 
@@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
        return 0;
 }
 
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
-                                          unsigned long len,
-                                          unsigned long offset,
-                                          unsigned long flags)
-{
-       return (unsigned long) mtd->priv + offset;
-}
-
 static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
                size_t *retlen, u_char *buf)
 {
@@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
        mtd->_erase = ram_erase;
        mtd->_point = ram_point;
        mtd->_unpoint = ram_unpoint;
-       mtd->_get_unmapped_area = ram_get_unmapped_area;
        mtd->_read = ram_read;
        mtd->_write = ram_write;
 
index 8087c36dc6935a8d6caf6d4b770be4fba60e1319..0ec85f316d24c04e8503c8ca7625a3c16399a97b 100644 (file)
@@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length
        }
 
        if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
-                               ioremap(start, length))) {
-               E("slram: ioremap failed\n");
+               memremap(start, length,
+                        MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
+               E("slram: memremap failed\n");
                return -EIO;
        }
        ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
@@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
 
        if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0))   {
                E("slram: Failed to register new device\n");
-               iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
+               memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
                kfree((*curmtd)->mtdinfo->priv);
                kfree((*curmtd)->mtdinfo);
                return(-EAGAIN);
@@ -206,7 +207,7 @@ static void unregister_devices(void)
        while (slram_mtdlist) {
                nextitem = slram_mtdlist->next;
                mtd_device_unregister(slram_mtdlist->mtdinfo);
-               iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
+               memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
                kfree(slram_mtdlist->mtdinfo->priv);
                kfree(slram_mtdlist->mtdinfo);
                kfree(slram_mtdlist);
index d504b3d1791da8ef70076d2b936c6afba7cc863c..70f488628464d41682b2392c5d677cdbbde55b92 100644 (file)
@@ -61,7 +61,7 @@ static struct map_info flagadm_map = {
                .bankwidth =    2,
 };
 
-static struct mtd_partition flagadm_parts[] = {
+static const struct mtd_partition flagadm_parts[] = {
        {
                .name =         "Bootloader",
                .offset =       FLASH_PARTITION0_ADDR,
index 15bbda03be6542cd860759a79a914387c842301d..a0b8fa7849a956c31250fd6946638f3423a9b58e 100644 (file)
@@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
 /*
  * MTD partitioning stuff
  */
-static struct mtd_partition partitions[] =
+static const struct mtd_partition partitions[] =
 {
        {
                .name = "FileSystem",
index 81dc2598bc0ac9085cadf7b332543e7752be5ade..3528497f96c779e8ce059471178bb771f895c216 100644 (file)
@@ -52,7 +52,7 @@
 /* partition_info gives details on the logical partitions that the split the
  * single flash device into. If the size if zero we use up to the end of the
  * device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
     {
            .name = "NetSc520 boot kernel",
            .offset = 0,
index a577ef8553d076fb01a13b01de55fb5a672e9e77..729579fb654ffcaea4cc6afb698ff43ce337f7de 100644 (file)
@@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = {
        .bankwidth = AMD_BUSWIDTH,
 };
 
-static struct mtd_partition nettel_amd_partitions[] = {
+static const struct mtd_partition nettel_amd_partitions[] = {
        {
                .name = "SnapGear BIOS config",
                .offset = 0x000e0000,
index 51572895c02cc42f8d3ad0d153625c8d3e7263ee..6d9a4d6f983949cf11692d1164b7e3456d867627 100644 (file)
@@ -43,7 +43,6 @@ struct platram_info {
        struct device           *dev;
        struct mtd_info         *mtd;
        struct map_info          map;
-       struct resource         *area;
        struct platdata_mtd_ram *pdata;
 };
 
@@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev)
 
        platram_setrw(info, PLATRAM_RO);
 
-       /* release resources */
-
-       if (info->area) {
-               release_resource(info->area);
-               kfree(info->area);
-       }
-
-       if (info->map.virt != NULL)
-               iounmap(info->map.virt);
-
        kfree(info);
 
        return 0;
@@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev)
        info->pdata = pdata;
 
        /* get the resource for the memory mapping */
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       if (res == NULL) {
-               dev_err(&pdev->dev, "no memory resource specified\n");
-               err = -ENOENT;
+       info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(info->map.virt)) {
+               err = PTR_ERR(info->map.virt);
+               dev_err(&pdev->dev, "failed to ioremap() region\n");
                goto exit_free;
        }
 
@@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev)
                        (char *)pdata->mapname : (char *)pdev->name;
        info->map.bankwidth = pdata->bankwidth;
 
-       /* register our usage of the memory area */
-
-       info->area = request_mem_region(res->start, info->map.size, pdev->name);
-       if (info->area == NULL) {
-               dev_err(&pdev->dev, "failed to request memory region\n");
-               err = -EIO;
-               goto exit_free;
-       }
-
-       /* remap the memory area */
-
-       info->map.virt = ioremap(res->start, info->map.size);
        dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
 
-       if (info->map.virt == NULL) {
-               dev_err(&pdev->dev, "failed to ioremap() region\n");
-               err = -EIO;
-               goto exit_free;
-       }
-
        simple_map_init(&info->map);
 
        dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
index 556a2dfe94c586e03b363f5c2056992346df72b1..4337d279ad83629d23a4c8890176fbc30a1f4509 100644 (file)
@@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin);
 /* partition_info gives details on the logical partitions that the split the
  * single flash device into. If the size if zero we use up to the end of the
  * device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
     { .name = "SBC-GXx flash boot partition",
       .offset = 0,
       .size =   BOOT_PARTITION_SIZE_KiB*1024 },
index 9969fedb1f13c2be53859df396823260bd1b9740..8f177e0acb8c1dbb350728eb5dda97a1168c1feb 100644 (file)
@@ -43,7 +43,7 @@ static struct map_info ts5500_map = {
        .phys = WINDOW_ADDR
 };
 
-static struct mtd_partition ts5500_partitions[] = {
+static const struct mtd_partition ts5500_partitions[] = {
        {
                .name = "Drive A",
                .offset = 0,
index 00a8190797ec1d4a445fd4bd46bfe03903699a15..aef030ca8601c709cc735a180b6c997f8b951f7c 100644 (file)
@@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo;
 
 /****************************************************************************/
 
-static struct mtd_partition uclinux_romfs[] = {
+static const struct mtd_partition uclinux_romfs[] = {
        { .name = "ROMfs" }
 };
 
index d573606b91c2a57a4ff07e040b79fd86a7762c5b..60bf53df5454101a533a17b8bf46eb11164a3824 100644 (file)
@@ -643,32 +643,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
        return err;
 }
 
-/*
- * try to support NOMMU mmaps on concatenated devices
- * - we don't support subdev spanning as we can't guarantee it'll work
- */
-static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
-                                             unsigned long len,
-                                             unsigned long offset,
-                                             unsigned long flags)
-{
-       struct mtd_concat *concat = CONCAT(mtd);
-       int i;
-
-       for (i = 0; i < concat->num_subdev; i++) {
-               struct mtd_info *subdev = concat->subdev[i];
-
-               if (offset >= subdev->size) {
-                       offset -= subdev->size;
-                       continue;
-               }
-
-               return mtd_get_unmapped_area(subdev, len, offset, flags);
-       }
-
-       return (unsigned long) -ENOSYS;
-}
-
 /*
  * This function constructs a virtual MTD device by concatenating
  * num_devs MTD devices. A pointer to the new device object is
@@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],       /* subdevices to c
        concat->mtd._unlock = concat_unlock;
        concat->mtd._suspend = concat_suspend;
        concat->mtd._resume = concat_resume;
-       concat->mtd._get_unmapped_area = concat_get_unmapped_area;
 
        /*
         * Combine the erase block size info of the subdevices:
index e7ea842ba3dbfc49f4e93d9c54b5f2b2cfc09f68..f80e911b8843819db8dcd1956c76ce2bf60b5ab8 100644 (file)
@@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint);
 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
                                    unsigned long offset, unsigned long flags)
 {
-       if (!mtd->_get_unmapped_area)
-               return -EOPNOTSUPP;
-       if (offset >= mtd->size || len > mtd->size - offset)
-               return -EINVAL;
-       return mtd->_get_unmapped_area(mtd, len, offset, flags);
+       size_t retlen;
+       void *virt;
+       int ret;
+
+       ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
+       if (ret)
+               return ret;
+       if (retlen != len) {
+               mtd_unpoint(mtd, offset, retlen);
+               return -ENOSYS;
+       }
+       return (unsigned long)virt;
 }
 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
 
@@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
 }
 EXPORT_SYMBOL_GPL(mtd_panic_write);
 
+static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
+                            struct mtd_oob_ops *ops)
+{
+       /*
+        * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
+        * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
+        *  this case.
+        */
+       if (!ops->datbuf)
+               ops->len = 0;
+
+       if (!ops->oobbuf)
+               ops->ooblen = 0;
+
+       if (offs < 0 || offs + ops->len >= mtd->size)
+               return -EINVAL;
+
+       if (ops->ooblen) {
+               u64 maxooblen;
+
+               if (ops->ooboffs >= mtd_oobavail(mtd, ops))
+                       return -EINVAL;
+
+               maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
+                             mtd_div_by_ws(offs, mtd)) *
+                            mtd_oobavail(mtd, ops)) - ops->ooboffs;
+               if (ops->ooblen > maxooblen)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
 {
        int ret_code;
@@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
        if (!mtd->_read_oob)
                return -EOPNOTSUPP;
 
+       ret_code = mtd_check_oob_ops(mtd, from, ops);
+       if (ret_code)
+               return ret_code;
+
        ledtrig_mtd_activity();
        /*
         * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
                                struct mtd_oob_ops *ops)
 {
+       int ret;
+
        ops->retlen = ops->oobretlen = 0;
        if (!mtd->_write_oob)
                return -EOPNOTSUPP;
        if (!(mtd->flags & MTD_WRITEABLE))
                return -EROFS;
+
+       ret = mtd_check_oob_ops(mtd, to, ops);
+       if (ret)
+               return ret;
+
        ledtrig_mtd_activity();
        return mtd->_write_oob(mtd, to, ops);
 }
index a308e707392d595902b77a03e2078ffe8da97d9e..be088bccd593142bd063955e29bdd84a4998d087 100644 (file)
@@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
        return part->parent->_unpoint(part->parent, from + part->offset, len);
 }
 
-static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
-                                           unsigned long len,
-                                           unsigned long offset,
-                                           unsigned long flags)
-{
-       struct mtd_part *part = mtd_to_part(mtd);
-
-       offset += part->offset;
-       return part->parent->_get_unmapped_area(part->parent, len, offset,
-                                               flags);
-}
-
 static int part_read_oob(struct mtd_info *mtd, loff_t from,
                struct mtd_oob_ops *ops)
 {
@@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
                slave->mtd._unpoint = part_unpoint;
        }
 
-       if (parent->_get_unmapped_area)
-               slave->mtd._get_unmapped_area = part_get_unmapped_area;
        if (parent->_read_oob)
                slave->mtd._read_oob = part_read_oob;
        if (parent->_write_oob)
index e43fea896d1ed8437a426a5fe0db010ceff7508a..d58a61c093047d80425e6b8c75083b57646a46b4 100644 (file)
@@ -79,14 +79,14 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
        pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
              mtd->index, mtd->name);
 
-       ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+       ret = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
        if (ret < 0) {
                deactivate_locked_super(sb);
                return ERR_PTR(ret);
        }
 
        /* go */
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
        return dget(sb->s_root);
 
        /* new mountpoint for an already mounted superblock */
@@ -202,7 +202,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
 not_an_MTD_device:
 #endif /* CONFIG_BLOCK */
 
-       if (!(flags & MS_SILENT))
+       if (!(flags & SB_SILENT))
                printk(KERN_NOTICE
                       "MTD: Attempt to mount non-MTD device \"%s\"\n",
                       dev_name);
index 7d9080e33865a92df1de69a69a9c0f4c1edb18fc..f07492c6f4b2bd76227a7e4251560ca3a424ec47 100644 (file)
@@ -50,7 +50,7 @@
  * Number of free eraseblocks below which GC can also collect low frag
  * blocks.
  */
-#define LOW_FRAG_GC_TRESHOLD   5
+#define LOW_FRAG_GC_THRESHOLD  5
 
 /*
  * Wear level cost amortization. We want to do wear leveling on the background
@@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
 {
        int idx, stopat;
 
-       if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
+       if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
                stopat = MTDSWAP_LOWFRAG;
        else
                stopat = MTDSWAP_HIFRAG;
index 3f2036f31da47ae7209365c31aab0bb363b6940f..bb48aafed9a2d2fcead8fd44ae5c571d7c3bb02f 100644 (file)
@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
        tristate "NAND support on PXA3xx and Armada 370/XP"
        depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
        help
+
          This enables the driver for the NAND flash device found on
-         PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+         PXA3xx processors (NFCv1) and also on 32-bit Armada
+         platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
+         platforms (7K, 8K) (NFCv2).
 
 config MTD_NAND_SLC_LPC32XX
        tristate "NXP LPC32xx SLC Controller"
index 6e2db700d923ddd14bdd6b0ae9cc5b554cf0ef1e..118a1349aad3a47a78242f8561d88c9ee2f2bb1d 100644 (file)
@@ -59,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI)          += sunxi_nand.o
 obj-$(CONFIG_MTD_NAND_HISI504)         += hisi504_nand.o
 obj-$(CONFIG_MTD_NAND_BRCMNAND)                += brcmnand/
 obj-$(CONFIG_MTD_NAND_QCOM)            += qcom_nandc.o
-obj-$(CONFIG_MTD_NAND_MTK)             += mtk_nand.o mtk_ecc.o
+obj-$(CONFIG_MTD_NAND_MTK)             += mtk_ecc.o mtk_nand.o
 
 nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
 nand-objs += nand_amd.o
index dcec9cf4983f812833bc9082eee7dc383c913eff..d60ada45c549450758a1079d097b81f56271580d 100644 (file)
@@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL;
  * Define partitions for flash devices
  */
 
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
        { .name         = "Kernel",
          .offset       = 0,
          .size         = 3 * SZ_1M + SZ_512K },
index f25eca79f4e56d096ed749918752e8aca26ee68d..90a71a56bc230f9cc3324d2351f9d6bfc9667254 100644 (file)
@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
                nc->op.addrs[nc->op.naddrs++] = page;
                nc->op.addrs[nc->op.naddrs++] = page >> 8;
 
-               if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
-                   (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
+               if (chip->options & NAND_ROW_ADDR_3)
                        nc->op.addrs[nc->op.naddrs++] = page >> 16;
        }
 }
@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
        struct atmel_nand_controller *nc = dev_get_drvdata(dev);
        struct atmel_nand *nand;
 
+       if (nc->pmecc)
+               atmel_pmecc_reset(nc->pmecc);
+
        list_for_each_entry(nand, &nc->chips, node) {
                int i;
 
@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
        .driver = {
                .name = "atmel-nand-controller",
                .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+               .pm = &atmel_nand_controller_pm_ops,
        },
        .probe = atmel_nand_controller_probe,
        .remove = atmel_nand_controller_remove,
index 8268636675efc8b3d81959f0f1911a145c42fcc1..fcbe4fd6e684bcc45721d52b4845e4018cb8edeb 100644 (file)
@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
 }
 EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
 
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+       writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+       writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
 int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
 {
        struct atmel_pmecc *pmecc = user->pmecc;
@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
 
 void atmel_pmecc_disable(struct atmel_pmecc_user *user)
 {
-       struct atmel_pmecc *pmecc = user->pmecc;
-
-       writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
-       writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+       atmel_pmecc_reset(user->pmecc);
        mutex_unlock(&user->pmecc->lock);
 }
 EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
 
        /* Disable all interrupts before registering the PMECC handler. */
        writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
-
-       /* Reset the ECC engine */
-       writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
-       writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+       atmel_pmecc_reset(pmecc);
 
        return pmecc;
 }
index a8ddbfca2ea50f5e7fe3dd80e4ab18c65588647b..817e0dd9fd15771231d9472764c1a1ae160c88c8 100644 (file)
@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
                        struct atmel_pmecc_user_req *req);
 void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
 
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
 int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
 void atmel_pmecc_disable(struct atmel_pmecc_user *user);
 int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
index 9d4a28fa6b73b2aaf17591ba57d5cf74b67d1aa7..8ab827edf94e2fbc97885032da26105eaeb2bdea 100644 (file)
@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
 
                        ctx->write_byte(mtd, (u8)(page_addr >> 8));
 
-                       /* One more address cycle for devices > 32MiB */
-                       if (this->chipsize > (32 << 20))
+                       if (this->options & NAND_ROW_ADDR_3)
                                ctx->write_byte(mtd,
                                                ((page_addr >> 16) & 0x0f));
                }
index 1fc435f994e1eca9738b6d28c1b5adb8deb367d5..b01c9804590e5d484675a71bc684e99b51929c41 100644 (file)
@@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io;
 /*
  * Define static partitions for flash device
  */
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
        [0] = {
                .name   = "cmx270-0",
                .offset = 0,
index 3087b0ba7b7f3d708f92cf58ea838bb7685d3edb..5124f8ae8c04032e0e301931554aea3c80c0aee2 100644 (file)
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  */
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
 #include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 
 #include "denali.h"
 
@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
 
 #define DENALI_NAND_NAME    "denali-nand"
 
-/* Host Data/Command Interface */
-#define DENALI_HOST_ADDR       0x00
-#define DENALI_HOST_DATA       0x10
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL    0x00
+#define DENALI_INDEXED_DATA    0x10
 
 #define DENALI_MAP00           (0 << 26)       /* direct access to buffer */
 #define DENALI_MAP01           (1 << 26)       /* read/write pages in PIO */
@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
  */
 #define DENALI_CLK_X_MULT      6
 
-/*
- * this macro allows us to convert from an MTD structure to our own
- * device context (denali) structure.
- */
 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
 {
        return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
 }
 
-static void denali_host_write(struct denali_nand_info *denali,
-                             uint32_t addr, uint32_t data)
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address).  The slave data is the actual data to
+ * be transferred.  This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+{
+       return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+                               u32 data)
 {
-       iowrite32(addr, denali->host + DENALI_HOST_ADDR);
-       iowrite32(data, denali->host + DENALI_HOST_DATA);
+       iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information.  This mode reduces the required address range.  The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+{
+       iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+       return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+                                u32 data)
+{
+       iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+       iowrite32(data, denali->host + DENALI_INDEXED_DATA);
 }
 
 /*
  * Use the configuration feature register to determine the maximum number of
  * banks that the hardware supports.
  */
-static void detect_max_banks(struct denali_nand_info *denali)
+static void denali_detect_max_banks(struct denali_nand_info *denali)
 {
        uint32_t features = ioread32(denali->reg + FEATURES);
 
-       denali->max_banks = 1 << (features & FEATURES__N_BANKS);
+       denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
 
        /* the encoding changed from rev 5.0 to 5.1 */
        if (denali->revision < 0x0501)
@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
                                                msecs_to_jiffies(1000));
        if (!time_left) {
                dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
-                       denali->irq_mask);
+                       irq_mask);
                return 0;
        }
 
@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
        return irq_status;
 }
 
-/*
- * This helper function setups the registers for ECC and whether or not
- * the spare area will be transferred.
- */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
-                               bool transfer_spare)
-{
-       int ecc_en_flag, transfer_spare_flag;
-
-       /* set ECC, transfer spare bits if needed */
-       ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
-       transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
-
-       /* Enable spare area/ECC per user's request. */
-       iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
-       iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
-}
-
 static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 {
        struct denali_nand_info *denali = mtd_to_denali(mtd);
+       u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
        int i;
 
-       iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
-                 denali->host + DENALI_HOST_ADDR);
-
        for (i = 0; i < len; i++)
-               buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
+               buf[i] = denali->host_read(denali, addr);
 }
 
 static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
        struct denali_nand_info *denali = mtd_to_denali(mtd);
+       u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
        int i;
 
-       iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
-                 denali->host + DENALI_HOST_ADDR);
-
        for (i = 0; i < len; i++)
-               iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
+               denali->host_write(denali, addr, buf[i]);
 }
 
 static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
 {
        struct denali_nand_info *denali = mtd_to_denali(mtd);
+       u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
        uint16_t *buf16 = (uint16_t *)buf;
        int i;
 
-       iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
-                 denali->host + DENALI_HOST_ADDR);
-
        for (i = 0; i < len / 2; i++)
-               buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
+               buf16[i] = denali->host_read(denali, addr);
 }
 
 static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
                               int len)
 {
        struct denali_nand_info *denali = mtd_to_denali(mtd);
+       u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
        const uint16_t *buf16 = (const uint16_t *)buf;
        int i;
 
-       iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
-                 denali->host + DENALI_HOST_ADDR);
-
        for (i = 0; i < len / 2; i++)
-               iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
+               denali->host_write(denali, addr, buf16[i]);
 }
 
 static uint8_t denali_read_byte(struct mtd_info *mtd)
@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
        if (ctrl & NAND_CTRL_CHANGE)
                denali_reset_irq(denali);
 
-       denali_host_write(denali, DENALI_BANK(denali) | type, dat);
+       denali->host_write(denali, DENALI_BANK(denali) | type, dat);
 }
 
 static int denali_dev_ready(struct mtd_info *mtd)
@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
                return 0;
        }
 
-       max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
+       max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
 
        /*
         * The register holds the maximum of per-sector corrected bitflips.
@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
        return max_bitflips;
 }
 
-#define ECC_SECTOR(x)  (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
-#define ECC_BYTE(x)    (((x) & ECC_ERROR_ADDRESS__OFFSET))
-#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
-#define ECC_ERR_DEVICE(x)      (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
-#define ECC_LAST_ERR(x)                ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-
 static int denali_sw_ecc_fixup(struct mtd_info *mtd,
                               struct denali_nand_info *denali,
                               unsigned long *uncor_ecc_flags, uint8_t *buf)
@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
 
        do {
                err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
-               err_sector = ECC_SECTOR(err_addr);
-               err_byte = ECC_BYTE(err_addr);
+               err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+               err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
 
                err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
-               err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
-               err_device = ECC_ERR_DEVICE(err_cor_info);
+               err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+                                         err_cor_info);
+               err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+                                      err_cor_info);
 
                /* reset the bitflip counter when crossing ECC sector */
                if (err_sector != prev_sector)
                        bitflips = 0;
 
-               if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
+               if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
                        /*
                         * Check later if this is a real ECC error, or
                         * an erased sector.
@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
                }
 
                prev_sector = err_sector;
-       } while (!ECC_LAST_ERR(err_cor_info));
+       } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
 
        /*
-        * Once handle all ecc errors, controller will trigger a
-        * ECC_TRANSACTION_DONE interrupt, so here just wait for
-        * a while for this interrupt
+        * Once handle all ECC errors, controller will trigger an
+        * ECC_TRANSACTION_DONE interrupt.
         */
        irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
        if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
        return max_bitflips;
 }
 
-/* programs the controller to either enable/disable DMA transfers */
-static void denali_enable_dma(struct denali_nand_info *denali, bool en)
-{
-       iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
-       ioread32(denali->reg + DMA_ENABLE);
-}
-
 static void denali_setup_dma64(struct denali_nand_info *denali,
                               dma_addr_t dma_addr, int page, int write)
 {
@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
         * 1. setup transfer type, interrupt when complete,
         *    burst len = 64 bytes, the number of pages
         */
-       denali_host_write(denali, mode,
-                         0x01002000 | (64 << 16) | (write << 8) | page_count);
+       denali->host_write(denali, mode,
+                          0x01002000 | (64 << 16) | (write << 8) | page_count);
 
        /* 2. set memory low address */
-       denali_host_write(denali, mode, dma_addr);
+       denali->host_write(denali, mode, lower_32_bits(dma_addr));
 
        /* 3. set memory high address */
-       denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
+       denali->host_write(denali, mode, upper_32_bits(dma_addr));
 }
 
 static void denali_setup_dma32(struct denali_nand_info *denali,
@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
        /* DMA is a four step process */
 
        /* 1. setup transfer type and # of pages */
-       denali_host_write(denali, mode | page,
-                         0x2000 | (write << 8) | page_count);
+       denali->host_write(denali, mode | page,
+                          0x2000 | (write << 8) | page_count);
 
        /* 2. set memory high address bits 23:8 */
-       denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+       denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
 
        /* 3. set memory low address bits 23:8 */
-       denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+       denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
 
        /* 4. interrupt when complete, burst len = 64 bytes */
-       denali_host_write(denali, mode | 0x14000, 0x2400);
-}
-
-static void denali_setup_dma(struct denali_nand_info *denali,
-                            dma_addr_t dma_addr, int page, int write)
-{
-       if (denali->caps & DENALI_CAP_DMA_64BIT)
-               denali_setup_dma64(denali, dma_addr, page, write);
-       else
-               denali_setup_dma32(denali, dma_addr, page, write);
+       denali->host_write(denali, mode | 0x14000, 0x2400);
 }
 
 static int denali_pio_read(struct denali_nand_info *denali, void *buf,
                           size_t size, int page, int raw)
 {
-       uint32_t addr = DENALI_BANK(denali) | page;
+       u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
        uint32_t *buf32 = (uint32_t *)buf;
        uint32_t irq_status, ecc_err_mask;
        int i;
@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
 
        denali_reset_irq(denali);
 
-       iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
        for (i = 0; i < size / 4; i++)
-               *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
+               *buf32++ = denali->host_read(denali, addr);
 
        irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
        if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
 static int denali_pio_write(struct denali_nand_info *denali,
                            const void *buf, size_t size, int page, int raw)
 {
-       uint32_t addr = DENALI_BANK(denali) | page;
+       u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
        const uint32_t *buf32 = (uint32_t *)buf;
        uint32_t irq_status;
        int i;
 
        denali_reset_irq(denali);
 
-       iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
        for (i = 0; i < size / 4; i++)
-               iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
+               denali->host_write(denali, addr, *buf32++);
 
        irq_status = denali_wait_for_irq(denali,
                                INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
                ecc_err_mask = INTR__ECC_ERR;
        }
 
-       denali_enable_dma(denali, true);
+       iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
 
        denali_reset_irq(denali);
-       denali_setup_dma(denali, dma_addr, page, write);
+       denali->setup_dma(denali, dma_addr, page, write);
 
-       /* wait for operation to complete */
        irq_status = denali_wait_for_irq(denali, irq_mask);
        if (!(irq_status & INTR__DMA_CMD_COMP))
                ret = -EIO;
        else if (irq_status & ecc_err_mask)
                ret = -EBADMSG;
 
-       denali_enable_dma(denali, false);
+       iowrite32(0, denali->reg + DMA_ENABLE);
+
        dma_unmap_single(denali->dev, dma_addr, size, dir);
 
        if (irq_status & INTR__ERASED_PAGE)
@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
 static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
                            size_t size, int page, int raw, int write)
 {
-       setup_ecc_for_xfer(denali, !raw, raw);
+       iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+       iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
+                 denali->reg + TRANSFER_SPARE_REG);
 
        if (denali->dma_avail)
                return denali_dma_xfer(denali, buf, size, page, raw, write);
@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
 
        denali_reset_irq(denali);
 
-       denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
-                         DENALI_ERASE);
+       denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+                          DENALI_ERASE);
 
        /* wait for erase to complete or failure to occur */
        irq_status = denali_wait_for_irq(denali,
@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 
        tmp = ioread32(denali->reg + ACC_CLKS);
        tmp &= ~ACC_CLKS__VALUE;
-       tmp |= acc_clks;
+       tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
        iowrite32(tmp, denali->reg + ACC_CLKS);
 
        /* tRWH -> RE_2_WE */
@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 
        tmp = ioread32(denali->reg + RE_2_WE);
        tmp &= ~RE_2_WE__VALUE;
-       tmp |= re_2_we;
+       tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
        iowrite32(tmp, denali->reg + RE_2_WE);
 
        /* tRHZ -> RE_2_RE */
@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 
        tmp = ioread32(denali->reg + RE_2_RE);
        tmp &= ~RE_2_RE__VALUE;
-       tmp |= re_2_re;
+       tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
        iowrite32(tmp, denali->reg + RE_2_RE);
 
-       /* tWHR -> WE_2_RE */
-       we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
+       /*
+        * tCCS, tWHR -> WE_2_RE
+        *
+        * With WE_2_RE properly set, the Denali controller automatically takes
+        * care of the delay; the driver need not set NAND_WAIT_TCCS.
+        */
+       we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
+                              t_clk);
        we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
 
        tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
        tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
-       tmp |= we_2_re;
+       tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
        iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
 
        /* tADL -> ADDR_2_DATA */
@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
        addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
 
        tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
-       tmp &= ~addr_2_data_mask;
-       tmp |= addr_2_data;
+       tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+       tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
        iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
 
        /* tREH, tWH -> RDWR_EN_HI_CNT */
@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 
        tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
        tmp &= ~RDWR_EN_HI_CNT__VALUE;
-       tmp |= rdwr_en_hi;
+       tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
        iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
 
        /* tRP, tWP -> RDWR_EN_LO_CNT */
@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 
        tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
        tmp &= ~RDWR_EN_LO_CNT__VALUE;
-       tmp |= rdwr_en_lo;
+       tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
        iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
 
        /* tCS, tCEA -> CS_SETUP_CNT */
@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
 
        tmp = ioread32(denali->reg + CS_SETUP_CNT);
        tmp &= ~CS_SETUP_CNT__VALUE;
-       tmp |= cs_setup;
+       tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
        iowrite32(tmp, denali->reg + CS_SETUP_CNT);
 
        return 0;
@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
         * if this value is 0, just let it be.
         */
        denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
-       detect_max_banks(denali);
+       denali_detect_max_banks(denali);
        iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
        iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
 
        iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-
-       /* Should set value for these registers when init */
-       iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
-       iowrite32(1, denali->reg + ECC_ENABLE);
 }
 
 int denali_calc_ecc_bytes(int step_size, int strength)
@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
        .free = denali_ooblayout_free,
 };
 
-/* initialize driver data structures */
-static void denali_drv_init(struct denali_nand_info *denali)
-{
-       /*
-        * the completion object will be used to notify
-        * the callee that the interrupt is done
-        */
-       init_completion(&denali->complete);
-
-       /*
-        * the spinlock will be used to synchronize the ISR with any
-        * element that might be access shared data (interrupt status)
-        */
-       spin_lock_init(&denali->irq_lock);
-}
-
 static int denali_multidev_fixup(struct denali_nand_info *denali)
 {
        struct nand_chip *chip = &denali->nand;
@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
 {
        struct nand_chip *chip = &denali->nand;
        struct mtd_info *mtd = nand_to_mtd(chip);
+       u32 features = ioread32(denali->reg + FEATURES);
        int ret;
 
        mtd->dev.parent = denali->dev;
        denali_hw_init(denali);
-       denali_drv_init(denali);
+
+       init_completion(&denali->complete);
+       spin_lock_init(&denali->irq_lock);
 
        denali_clear_irq_all(denali);
 
-       /* Request IRQ after all the hardware initialization is finished */
        ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
                               IRQF_SHARED, DENALI_NAND_NAME, denali);
        if (ret) {
@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
        if (!mtd->name)
                mtd->name = "denali-nand";
 
-       /* register the driver with the NAND core subsystem */
        chip->select_chip = denali_select_chip;
        chip->read_byte = denali_read_byte;
        chip->write_byte = denali_write_byte;
@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
        chip->dev_ready = denali_dev_ready;
        chip->waitfunc = denali_waitfunc;
 
+       if (features & FEATURES__INDEX_ADDR) {
+               denali->host_read = denali_indexed_read;
+               denali->host_write = denali_indexed_write;
+       } else {
+               denali->host_read = denali_direct_read;
+               denali->host_write = denali_direct_write;
+       }
+
        /* clk rate info is needed for setup_data_interface */
        if (denali->clk_x_rate)
                chip->setup_data_interface = denali_setup_data_interface;
 
-       /*
-        * scan for NAND devices attached to the controller
-        * this is the first stage in a two step process to register
-        * with the nand subsystem
-        */
        ret = nand_scan_ident(mtd, denali->max_banks, NULL);
        if (ret)
                goto disable_irq;
@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
        if (denali->dma_avail) {
                chip->options |= NAND_USE_BOUNCE_BUFFER;
                chip->buf_align = 16;
+               if (denali->caps & DENALI_CAP_DMA_64BIT)
+                       denali->setup_dma = denali_setup_dma64;
+               else
+                       denali->setup_dma = denali_setup_dma32;
        }
 
-       /*
-        * second stage of the NAND scan
-        * this stage requires information regarding ECC and
-        * bad block management.
-        */
-
        chip->bbt_options |= NAND_BBT_USE_FLASH;
        chip->bbt_options |= NAND_BBT_NO_OOB;
-
        chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-
-       /* no subpage writes on denali */
        chip->options |= NAND_NO_SUBPAGE_WRITE;
 
        ret = denali_ecc_setup(mtd, chip, denali);
@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
                "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
                chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
 
-       iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
+       iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+                 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
                  denali->reg + ECC_CORRECTION);
        iowrite32(mtd->erasesize / mtd->writesize,
                  denali->reg + PAGES_PER_BLOCK);
        iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
                  denali->reg + DEVICE_WIDTH);
+       iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+                 denali->reg + TWO_ROW_ADDR_CYCLES);
        iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
        iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
 
@@ -1441,7 +1403,6 @@ disable_irq:
 }
 EXPORT_SYMBOL(denali_init);
 
-/* driver exit point */
 void denali_remove(struct denali_nand_info *denali)
 {
        struct mtd_info *mtd = nand_to_mtd(&denali->nand);
index 9239e6793e6eade20a38287cfb26c399f09ac2ac..2911066dacace199615c9e673d3dfcab830bfe1a 100644 (file)
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  */
 
 #ifndef __DENALI_H__
 #define __DENALI_H__
 
 #include <linux/bitops.h>
+#include <linux/completion.h>
 #include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
 
 #define DEVICE_RESET                           0x0
 #define     DEVICE_RESET__BANK(bank)                   BIT(bank)
 #define ECC_CORRECTION                         0x1b0
 #define     ECC_CORRECTION__VALUE                      GENMASK(4, 0)
 #define     ECC_CORRECTION__ERASE_THRESHOLD            GENMASK(31, 16)
-#define     MAKE_ECC_CORRECTION(val, thresh)           \
-                       (((val) & (ECC_CORRECTION__VALUE)) | \
-                       (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
 
 #define READ_MODE                              0x1c0
 #define     READ_MODE__VALUE                           GENMASK(3, 0)
 
 #define ECC_ERROR_ADDRESS                      0x630
 #define     ECC_ERROR_ADDRESS__OFFSET                  GENMASK(11, 0)
-#define     ECC_ERROR_ADDRESS__SECTOR_NR               GENMASK(15, 12)
+#define     ECC_ERROR_ADDRESS__SECTOR                  GENMASK(15, 12)
 
 #define ERR_CORRECTION_INFO                    0x640
-#define     ERR_CORRECTION_INFO__BYTEMASK              GENMASK(7, 0)
-#define     ERR_CORRECTION_INFO__DEVICE_NR             GENMASK(11, 8)
-#define     ERR_CORRECTION_INFO__ERROR_TYPE            BIT(14)
-#define     ERR_CORRECTION_INFO__LAST_ERR_INFO         BIT(15)
+#define     ERR_CORRECTION_INFO__BYTE                  GENMASK(7, 0)
+#define     ERR_CORRECTION_INFO__DEVICE                        GENMASK(11, 8)
+#define     ERR_CORRECTION_INFO__UNCOR                 BIT(14)
+#define     ERR_CORRECTION_INFO__LAST_ERR              BIT(15)
 
 #define ECC_COR_INFO(bank)                     (0x650 + (bank) / 2 * 0x10)
 #define     ECC_COR_INFO__SHIFT(bank)                  ((bank) % 2 * 8)
@@ -310,23 +305,24 @@ struct denali_nand_info {
        struct device *dev;
        void __iomem *reg;              /* Register Interface */
        void __iomem *host;             /* Host Data/Command Interface */
-
-       /* elements used by ISR */
        struct completion complete;
-       spinlock_t irq_lock;
-       uint32_t irq_mask;
-       uint32_t irq_status;
+       spinlock_t irq_lock;            /* protect irq_mask and irq_status */
+       u32 irq_mask;                   /* interrupts we are waiting for */
+       u32 irq_status;                 /* interrupts that have happened */
        int irq;
-
-       void *buf;
+       void *buf;                      /* for syndrome layout conversion */
        dma_addr_t dma_addr;
-       int dma_avail;
+       int dma_avail;                  /* can support DMA? */
        int devs_per_cs;                /* devices connected in parallel */
-       int oob_skip_bytes;
+       int oob_skip_bytes;             /* number of bytes reserved for BBM */
        int max_banks;
-       unsigned int revision;
-       unsigned int caps;
+       unsigned int revision;          /* IP revision */
+       unsigned int caps;              /* IP capability (or quirk) */
        const struct nand_ecc_caps *ecc_caps;
+       u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
+       void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
+       void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
+                         int page, int write);
 };
 
 #define DENALI_CAP_HW_ECC_FIXUP                        BIT(0)
index 56e2e177644d6c3d9b0301b50ed780c51509a9bb..cfd33e6ca77f903a6afc636e73f31ffb40d0d0bd 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  */
+
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/platform_device.h>
 
 #include "denali.h"
 
@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
                .of_match_table = denali_nand_dt_ids,
        },
 };
-
 module_platform_driver(denali_dt_driver);
 
 MODULE_LICENSE("GPL");
index 81370c79aa48aa4fe6ef3d4d65bb7dd3c2a91db7..57fb7ae314126ca567fe4ddea476c885c17e5105 100644 (file)
@@ -11,6 +11,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  */
+
+#include <linux/errno.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -106,7 +109,6 @@ failed_remap_reg:
        return ret;
 }
 
-/* driver exit point */
 static void denali_pci_remove(struct pci_dev *dev)
 {
        struct denali_nand_info *denali = pci_get_drvdata(dev);
@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
        .probe = denali_pci_probe,
        .remove = denali_pci_remove,
 };
-
 module_pci_driver(denali_pci_driver);
index c3aa53caab5cfe539def22e4dc3302afd6a0b67f..72671dc52e2e705ed8d7ee002ce3dba91124995f 100644 (file)
@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
                if (page_addr != -1) {
                        WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
                        WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
-                       /* One more address cycle for higher density devices */
-                       if (this->chipsize & 0x0c000000) {
+                       if (this->options & NAND_ROW_ADDR_3) {
                                WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
                                printk("high density\n");
                        }
index fd3648952b5a70281ce9e7ac596facdfe4ddfd04..484f7fbc3f7d2d11cd66fc3416e64ab38d47f852 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/io.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/nand-gpio.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_gpio.h>
 
 struct gpiomtd {
        void __iomem            *io_sync;
        struct nand_chip        nand_chip;
        struct gpio_nand_platdata plat;
+       struct gpio_desc *nce; /* Optional chip enable */
+       struct gpio_desc *cle;
+       struct gpio_desc *ale;
+       struct gpio_desc *rdy;
+       struct gpio_desc *nwp; /* Optional write protection */
 };
 
 static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
        gpio_nand_dosync(gpiomtd);
 
        if (ctrl & NAND_CTRL_CHANGE) {
-               if (gpio_is_valid(gpiomtd->plat.gpio_nce))
-                       gpio_set_value(gpiomtd->plat.gpio_nce,
-                                      !(ctrl & NAND_NCE));
-               gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
-               gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+               if (gpiomtd->nce)
+                       gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
+               gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
+               gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
                gpio_nand_dosync(gpiomtd);
        }
        if (cmd == NAND_CMD_NONE)
@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
 {
        struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
 
-       return gpio_get_value(gpiomtd->plat.gpio_rdy);
+       return gpiod_get_value(gpiomtd->rdy);
 }
 
 #ifdef CONFIG_OF
@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
                }
        }
 
-       plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
-       plat->gpio_nce = of_get_gpio(dev->of_node, 1);
-       plat->gpio_ale = of_get_gpio(dev->of_node, 2);
-       plat->gpio_cle = of_get_gpio(dev->of_node, 3);
-       plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
-
        if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
                plat->chip_delay = val;
 
@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
 
        nand_release(nand_to_mtd(&gpiomtd->nand_chip));
 
-       if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-               gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
-       if (gpio_is_valid(gpiomtd->plat.gpio_nce))
-               gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+       /* Enable write protection and disable the chip */
+       if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+               gpiod_set_value(gpiomtd->nwp, 0);
+       if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+               gpiod_set_value(gpiomtd->nce, 0);
 
        return 0;
 }
@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
        struct nand_chip *chip;
        struct mtd_info *mtd;
        struct resource *res;
+       struct device *dev = &pdev->dev;
        int ret = 0;
 
-       if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+       if (!dev->of_node && !dev_get_platdata(dev))
                return -EINVAL;
 
-       gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+       gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
        if (!gpiomtd)
                return -ENOMEM;
 
        chip = &gpiomtd->nand_chip;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+       chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
        if (IS_ERR(chip->IO_ADDR_R))
                return PTR_ERR(chip->IO_ADDR_R);
 
        res = gpio_nand_get_io_sync(pdev);
        if (res) {
-               gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+               gpiomtd->io_sync = devm_ioremap_resource(dev, res);
                if (IS_ERR(gpiomtd->io_sync))
                        return PTR_ERR(gpiomtd->io_sync);
        }
 
-       ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+       ret = gpio_nand_get_config(dev, &gpiomtd->plat);
        if (ret)
                return ret;
 
-       if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
-               ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
-                                       "NAND NCE");
-               if (ret)
-                       return ret;
-               gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+       /* Just enable the chip */
+       gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+       if (IS_ERR(gpiomtd->nce))
+               return PTR_ERR(gpiomtd->nce);
+
+       /* We disable write protection once we know probe() will succeed */
+       gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiomtd->nwp)) {
+               ret = PTR_ERR(gpiomtd->nwp);
+               goto out_ce;
        }
 
-       if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
-               ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
-                                       "NAND NWP");
-               if (ret)
-                       return ret;
+       gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiomtd->nwp)) {
+               ret = PTR_ERR(gpiomtd->nwp);
+               goto out_ce;
        }
 
-       ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
-       if (ret)
-               return ret;
-       gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+       gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiomtd->cle)) {
+               ret = PTR_ERR(gpiomtd->cle);
+               goto out_ce;
+       }
 
-       ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
-       if (ret)
-               return ret;
-       gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
-
-       if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
-               ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
-                                       "NAND RDY");
-               if (ret)
-                       return ret;
-               gpio_direction_input(gpiomtd->plat.gpio_rdy);
-               chip->dev_ready = gpio_nand_devready;
+       gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+       if (IS_ERR(gpiomtd->rdy)) {
+               ret = PTR_ERR(gpiomtd->rdy);
+               goto out_ce;
        }
+       /* Using RDY pin */
+       if (gpiomtd->rdy)
+               chip->dev_ready = gpio_nand_devready;
 
        nand_set_flash_node(chip, pdev->dev.of_node);
        chip->IO_ADDR_W         = chip->IO_ADDR_R;
@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
        chip->cmd_ctrl          = gpio_nand_cmd_ctrl;
 
        mtd                     = nand_to_mtd(chip);
-       mtd->dev.parent         = &pdev->dev;
+       mtd->dev.parent         = dev;
 
        platform_set_drvdata(pdev, gpiomtd);
 
-       if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-               gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+       /* Disable write protection, if wired up */
+       if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+               gpiod_direction_output(gpiomtd->nwp, 1);
 
        ret = nand_scan(mtd, 1);
        if (ret)
@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
                return 0;
 
 err_wp:
-       if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-               gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+       if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+               gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+       if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+               gpiod_set_value(gpiomtd->nce, 0);
 
        return ret;
 }
index d9ee1a7e695636b21f13236002903ae7dfe0249a..0897261c3e1702fe69239eb3ef3cf7bc1bc9e1e6 100644 (file)
@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
                host->addr_value[0] |= (page_addr & 0xffff)
                        << (host->addr_cycle * 8);
                host->addr_cycle    += 2;
-               /* One more address cycle for devices > 128MiB */
-               if (chip->chipsize > (128 << 20)) {
+               if (chip->options & NAND_ROW_ADDR_3) {
                        host->addr_cycle += 1;
                        if (host->command == NAND_CMD_ERASE1)
                                host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
index 7f3b065b6b8fe7bfcc5e5c20a55262cb8d4a95c9..c51d214d169ea436273435f99965031e3ad70348 100644 (file)
@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
                op = ECC_DECODE;
                dec = readw(ecc->regs + ECC_DECDONE);
                if (dec & ecc->sectors) {
+                       /*
+                        * Clear decode IRQ status once again to ensure that
+                        * there will be no extra IRQ.
+                        */
+                       readw(ecc->regs + ECC_DECIRQ_STA);
                        ecc->sectors = 0;
                        complete(&ecc->done);
                } else {
@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
                }
        }
 
-       writel(0, ecc->regs + ECC_IRQ_REG(op));
-
        return IRQ_HANDLED;
 }
 
@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
 
        /* disable it */
        mtk_ecc_wait_idle(ecc, op);
+       if (op == ECC_DECODE)
+               /*
+                * Clear decode IRQ status in case there is a timeout to wait
+                * decode IRQ.
+                */
+               readw(ecc->regs + ECC_DECIRQ_STA);
        writew(0, ecc->regs + ECC_IRQ_REG(op));
        writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
 
index 53e5e0337c3e206dc6f791170360b44333504ac5..f3be0b2a88692b96589023b825fe6a1a1b9b566e 100644 (file)
@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
  * waits for completion. */
 static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
 {
-       pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+       dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
 
        writew(cmd, NFC_V1_V2_FLASH_CMD);
        writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
                        udelay(1);
                }
                if (max_retries < 0)
-                       pr_debug("%s: RESET failed\n", __func__);
+                       dev_dbg(host->dev, "%s: RESET failed\n", __func__);
        } else {
                /* Wait for operation to complete */
                wait_op_done(host, useirq);
@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
  * a NAND command. */
 static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
 {
-       pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+       dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
 
        writew(addr, NFC_V1_V2_FLASH_ADDR);
        writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
        uint16_t ecc_status = get_ecc_status_v1(host);
 
        if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
-               pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+               dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
                return -EBADMSG;
        }
 
@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
        do {
                err = ecc_stat & ecc_bit_mask;
                if (err > err_limit) {
-                       printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+                       dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
                        return -EBADMSG;
                } else {
                        ret += err;
@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
                ecc_stat >>= 4;
        } while (--no_subpages);
 
-       pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+       dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
 
        return ret;
 }
@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
                host->buf_start++;
        }
 
-       pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+       dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
        return ret;
 }
 
@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
                                host->devtype_data->send_addr(host,
                                                (page_addr >> 8) & 0xff, true);
                } else {
-                       /* One more address cycle for higher density devices */
-                       if (mtd->size >= 0x4000000) {
+                       if (nand_chip->options & NAND_ROW_ADDR_3) {
                                /* paddr_8 - paddr_15 */
                                host->devtype_data->send_addr(host,
                                                (page_addr >> 8) & 0xff,
@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
        struct nand_chip *nand_chip = mtd_to_nand(mtd);
        struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
 
-       pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+       dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
              command, column, page_addr);
 
        /* Reset command state information */
index 12edaae17d81f2228eefcc50b9d5de7433119987..6135d007a0686193d557ce831553ed53c9dd58c6 100644 (file)
@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
        struct nand_chip *chip = mtd_to_nand(mtd);
        struct nand_ecc_ctrl *ecc = &chip->ecc;
 
-       if (section)
+       if (section || !ecc->total)
                return -ERANGE;
 
        oobregion->length = ecc->total;
@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
                chip->cmd_ctrl(mtd, page_addr, ctrl);
                ctrl &= ~NAND_CTRL_CHANGE;
                chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
-               /* One more address cycle for devices > 32MiB */
-               if (chip->chipsize > (32 << 20))
+               if (chip->options & NAND_ROW_ADDR_3)
                        chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
        }
        chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
                        chip->cmd_ctrl(mtd, page_addr, ctrl);
                        chip->cmd_ctrl(mtd, page_addr >> 8,
                                       NAND_NCE | NAND_ALE);
-                       /* One more address cycle for devices > 128MiB */
-                       if (chip->chipsize > (128 << 20))
+                       if (chip->options & NAND_ROW_ADDR_3)
                                chip->cmd_ctrl(mtd, page_addr >> 16,
                                               NAND_NCE | NAND_ALE);
                }
@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nand_reset);
 
 /**
  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
                            size_t *retlen, const uint8_t *buf)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
+       int chipnr = (int)(to >> chip->chip_shift);
        struct mtd_oob_ops ops;
        int ret;
 
-       /* Wait for the device to get ready */
-       panic_nand_wait(mtd, chip, 400);
-
        /* Grab the device */
        panic_nand_get_device(chip, mtd, FL_WRITING);
 
+       chip->select_chip(mtd, chipnr);
+
+       /* Wait for the device to get ready */
+       panic_nand_wait(mtd, chip, 400);
+
        memset(&ops, 0, sizeof(ops));
        ops.len = len;
        ops.datbuf = (uint8_t *)buf;
@@ -3999,6 +4001,9 @@ ident_done:
                chip->chip_shift += 32 - 1;
        }
 
+       if (chip->chip_shift - chip->page_shift > 16)
+               chip->options |= NAND_ROW_ADDR_3;
+
        chip->badblockbits = 8;
        chip->erase = single_erase;
 
@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
                        mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
                        break;
                default:
+                       /*
+                        * Expose the whole OOB area to users if ECC_NONE
+                        * is passed. We could do that for all kind of
+                        * ->oobsize, but we must keep the old large/small
+                        * page with ECC layout when ->oobsize <= 128 for
+                        * compatibility reasons.
+                        */
+                       if (ecc->mode == NAND_ECC_NONE) {
+                               mtd_set_ooblayout(mtd,
+                                               &nand_ooblayout_lp_ops);
+                               break;
+                       }
+
                        WARN(1, "No oob scheme defined for oobsize %d\n",
                                mtd->oobsize);
                        ret = -EINVAL;
index 246b4393118e4df5c645b3e4eb338a70958ba4b9..44322a363ba549dd7120138fb2b0f977a09260f5 100644 (file)
@@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
        struct dentry *root = nsmtd->dbg.dfs_dir;
        struct dentry *dent;
 
-       if (!IS_ENABLED(CONFIG_DEBUG_FS))
+       /*
+        * Just skip debugfs initialization when the debugfs directory is
+        * missing.
+        */
+       if (IS_ERR_OR_NULL(root)) {
+               if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+                   !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+                       NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
                return 0;
-
-       if (IS_ERR_OR_NULL(root))
-               return -1;
+       }
 
        dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
                                   root, dev, &dfs_fops);
index 7bb4d2ea93421a4d880b77bdce8fad99052fc53f..af5b32c9a791dc18934b916cec03573995bf307a 100644 (file)
@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
                if (page_addr != -1) {
                        write_addr_reg(nand, page_addr);
 
-                       if (chip->chipsize > (128 << 20)) {
+                       if (chip->options & NAND_ROW_ADDR_3) {
                                write_addr_reg(nand, page_addr >> 8);
                                write_addr_reg(nand, page_addr >> 16 | ENDADDR);
                        } else {
index 54540c8fa1a28edab3fbaaf648ab5032942bbcae..dad438c4906af205a73f44eade6743cd4460faf9 100644 (file)
@@ -1133,129 +1133,172 @@ static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
                                0x97, 0x79, 0xe5, 0x24, 0xb5};
 
 /**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
  * @mtd:       MTD device structure
  * @dat:       The pointer to data on which ecc is computed
  * @ecc_code:  The ecc_code buffer
+ * @i:         The sector number (for a multi sector page)
  *
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
  */
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
-                                       const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+                                  const u_char *dat, u_char *ecc_calc, int i)
 {
        struct omap_nand_info *info = mtd_to_omap(mtd);
        int eccbytes    = info->nand.ecc.bytes;
        struct gpmc_nand_regs   *gpmc_regs = &info->reg;
        u8 *ecc_code;
-       unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+       unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
        u32 val;
-       int i, j;
+       int j;
+
+       ecc_code = ecc_calc;
+       switch (info->ecc_opt) {
+       case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+       case OMAP_ECC_BCH8_CODE_HW:
+               bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+               bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+               bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+               bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+               *ecc_code++ = (bch_val4 & 0xFF);
+               *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+               *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+               *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+               *ecc_code++ = (bch_val3 & 0xFF);
+               *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+               *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+               *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+               *ecc_code++ = (bch_val2 & 0xFF);
+               *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+               *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+               *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+               *ecc_code++ = (bch_val1 & 0xFF);
+               break;
+       case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+       case OMAP_ECC_BCH4_CODE_HW:
+               bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+               bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+               *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+               *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+               *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+                       ((bch_val1 >> 28) & 0xF);
+               *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+               *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+               *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+               *ecc_code++ = ((bch_val1 & 0xF) << 4);
+               break;
+       case OMAP_ECC_BCH16_CODE_HW:
+               val = readl(gpmc_regs->gpmc_bch_result6[i]);
+               ecc_code[0]  = ((val >>  8) & 0xFF);
+               ecc_code[1]  = ((val >>  0) & 0xFF);
+               val = readl(gpmc_regs->gpmc_bch_result5[i]);
+               ecc_code[2]  = ((val >> 24) & 0xFF);
+               ecc_code[3]  = ((val >> 16) & 0xFF);
+               ecc_code[4]  = ((val >>  8) & 0xFF);
+               ecc_code[5]  = ((val >>  0) & 0xFF);
+               val = readl(gpmc_regs->gpmc_bch_result4[i]);
+               ecc_code[6]  = ((val >> 24) & 0xFF);
+               ecc_code[7]  = ((val >> 16) & 0xFF);
+               ecc_code[8]  = ((val >>  8) & 0xFF);
+               ecc_code[9]  = ((val >>  0) & 0xFF);
+               val = readl(gpmc_regs->gpmc_bch_result3[i]);
+               ecc_code[10] = ((val >> 24) & 0xFF);
+               ecc_code[11] = ((val >> 16) & 0xFF);
+               ecc_code[12] = ((val >>  8) & 0xFF);
+               ecc_code[13] = ((val >>  0) & 0xFF);
+               val = readl(gpmc_regs->gpmc_bch_result2[i]);
+               ecc_code[14] = ((val >> 24) & 0xFF);
+               ecc_code[15] = ((val >> 16) & 0xFF);
+               ecc_code[16] = ((val >>  8) & 0xFF);
+               ecc_code[17] = ((val >>  0) & 0xFF);
+               val = readl(gpmc_regs->gpmc_bch_result1[i]);
+               ecc_code[18] = ((val >> 24) & 0xFF);
+               ecc_code[19] = ((val >> 16) & 0xFF);
+               ecc_code[20] = ((val >>  8) & 0xFF);
+               ecc_code[21] = ((val >>  0) & 0xFF);
+               val = readl(gpmc_regs->gpmc_bch_result0[i]);
+               ecc_code[22] = ((val >> 24) & 0xFF);
+               ecc_code[23] = ((val >> 16) & 0xFF);
+               ecc_code[24] = ((val >>  8) & 0xFF);
+               ecc_code[25] = ((val >>  0) & 0xFF);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* ECC scheme specific syndrome customizations */
+       switch (info->ecc_opt) {
+       case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+               /* Add constant polynomial to remainder, so that
+                * ECC of blank pages results in 0x0 on reading back
+                */
+               for (j = 0; j < eccbytes; j++)
+                       ecc_calc[j] ^= bch4_polynomial[j];
+               break;
+       case OMAP_ECC_BCH4_CODE_HW:
+               /* Set  8th ECC byte as 0x0 for ROM compatibility */
+               ecc_calc[eccbytes - 1] = 0x0;
+               break;
+       case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+               /* Add constant polynomial to remainder, so that
+                * ECC of blank pages results in 0x0 on reading back
+                */
+               for (j = 0; j < eccbytes; j++)
+                       ecc_calc[j] ^= bch8_polynomial[j];
+               break;
+       case OMAP_ECC_BCH8_CODE_HW:
+               /* Set 14th ECC byte as 0x0 for ROM compatibility */
+               ecc_calc[eccbytes - 1] = 0x0;
+               break;
+       case OMAP_ECC_BCH16_CODE_HW:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd:       MTD device structure
+ * @dat:       The pointer to data on which ecc is computed
+ * @ecc_code:  The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+                                    const u_char *dat, u_char *ecc_calc)
+{
+       return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd:       MTD device structure
+ * @dat:       The pointer to data on which ecc is computed
+ * @ecc_code:  The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+                                       const u_char *dat, u_char *ecc_calc)
+{
+       struct omap_nand_info *info = mtd_to_omap(mtd);
+       int eccbytes = info->nand.ecc.bytes;
+       unsigned long nsectors;
+       int i, ret;
 
        nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
        for (i = 0; i < nsectors; i++) {
-               ecc_code = ecc_calc;
-               switch (info->ecc_opt) {
-               case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-               case OMAP_ECC_BCH8_CODE_HW:
-                       bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-                       bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-                       bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
-                       bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
-                       *ecc_code++ = (bch_val4 & 0xFF);
-                       *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
-                       *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
-                       *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
-                       *ecc_code++ = (bch_val3 & 0xFF);
-                       *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
-                       *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
-                       *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
-                       *ecc_code++ = (bch_val2 & 0xFF);
-                       *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
-                       *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
-                       *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
-                       *ecc_code++ = (bch_val1 & 0xFF);
-                       break;
-               case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-               case OMAP_ECC_BCH4_CODE_HW:
-                       bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-                       bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-                       *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
-                       *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
-                       *ecc_code++ = ((bch_val2 & 0xF) << 4) |
-                               ((bch_val1 >> 28) & 0xF);
-                       *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
-                       *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
-                       *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
-                       *ecc_code++ = ((bch_val1 & 0xF) << 4);
-                       break;
-               case OMAP_ECC_BCH16_CODE_HW:
-                       val = readl(gpmc_regs->gpmc_bch_result6[i]);
-                       ecc_code[0]  = ((val >>  8) & 0xFF);
-                       ecc_code[1]  = ((val >>  0) & 0xFF);
-                       val = readl(gpmc_regs->gpmc_bch_result5[i]);
-                       ecc_code[2]  = ((val >> 24) & 0xFF);
-                       ecc_code[3]  = ((val >> 16) & 0xFF);
-                       ecc_code[4]  = ((val >>  8) & 0xFF);
-                       ecc_code[5]  = ((val >>  0) & 0xFF);
-                       val = readl(gpmc_regs->gpmc_bch_result4[i]);
-                       ecc_code[6]  = ((val >> 24) & 0xFF);
-                       ecc_code[7]  = ((val >> 16) & 0xFF);
-                       ecc_code[8]  = ((val >>  8) & 0xFF);
-                       ecc_code[9]  = ((val >>  0) & 0xFF);
-                       val = readl(gpmc_regs->gpmc_bch_result3[i]);
-                       ecc_code[10] = ((val >> 24) & 0xFF);
-                       ecc_code[11] = ((val >> 16) & 0xFF);
-                       ecc_code[12] = ((val >>  8) & 0xFF);
-                       ecc_code[13] = ((val >>  0) & 0xFF);
-                       val = readl(gpmc_regs->gpmc_bch_result2[i]);
-                       ecc_code[14] = ((val >> 24) & 0xFF);
-                       ecc_code[15] = ((val >> 16) & 0xFF);
-                       ecc_code[16] = ((val >>  8) & 0xFF);
-                       ecc_code[17] = ((val >>  0) & 0xFF);
-                       val = readl(gpmc_regs->gpmc_bch_result1[i]);
-                       ecc_code[18] = ((val >> 24) & 0xFF);
-                       ecc_code[19] = ((val >> 16) & 0xFF);
-                       ecc_code[20] = ((val >>  8) & 0xFF);
-                       ecc_code[21] = ((val >>  0) & 0xFF);
-                       val = readl(gpmc_regs->gpmc_bch_result0[i]);
-                       ecc_code[22] = ((val >> 24) & 0xFF);
-                       ecc_code[23] = ((val >> 16) & 0xFF);
-                       ecc_code[24] = ((val >>  8) & 0xFF);
-                       ecc_code[25] = ((val >>  0) & 0xFF);
-                       break;
-               default:
-                       return -EINVAL;
-               }
-
-               /* ECC scheme specific syndrome customizations */
-               switch (info->ecc_opt) {
-               case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-                       /* Add constant polynomial to remainder, so that
-                        * ECC of blank pages results in 0x0 on reading back */
-                       for (j = 0; j < eccbytes; j++)
-                               ecc_calc[j] ^= bch4_polynomial[j];
-                       break;
-               case OMAP_ECC_BCH4_CODE_HW:
-                       /* Set  8th ECC byte as 0x0 for ROM compatibility */
-                       ecc_calc[eccbytes - 1] = 0x0;
-                       break;
-               case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-                       /* Add constant polynomial to remainder, so that
-                        * ECC of blank pages results in 0x0 on reading back */
-                       for (j = 0; j < eccbytes; j++)
-                               ecc_calc[j] ^= bch8_polynomial[j];
-                       break;
-               case OMAP_ECC_BCH8_CODE_HW:
-                       /* Set 14th ECC byte as 0x0 for ROM compatibility */
-                       ecc_calc[eccbytes - 1] = 0x0;
-                       break;
-               case OMAP_ECC_BCH16_CODE_HW:
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+               if (ret)
+                       return ret;
 
-       ecc_calc += eccbytes;
+               ecc_calc += eccbytes;
        }
 
        return 0;
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
        chip->write_buf(mtd, buf, mtd->writesize);
 
        /* Update ecc vector from GPMC result registers */
-       chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+       omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
 
        ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
                                         chip->ecc.total);
@@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
        return 0;
 }
 
+/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd:       mtd info structure
+ * @chip:      nand chip info structure
+ * @offset:    column address of subpage within the page
+ * @data_len:  data length
+ * @buf:       data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+                                 struct nand_chip *chip, u32 offset,
+                                 u32 data_len, const u8 *buf,
+                                 int oob_required, int page)
+{
+       u8 *ecc_calc = chip->buffers->ecccalc;
+       int ecc_size      = chip->ecc.size;
+       int ecc_bytes     = chip->ecc.bytes;
+       int ecc_steps     = chip->ecc.steps;
+       u32 start_step = offset / ecc_size;
+       u32 end_step   = (offset + data_len - 1) / ecc_size;
+       int step, ret = 0;
+
+       /*
+        * Write entire page at one go as it would be optimal
+        * as ECC is calculated by hardware.
+        * ECC is calculated for all subpages but we choose
+        * only what we want.
+        */
+
+       /* Enable GPMC ECC engine */
+       chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+       /* Write data */
+       chip->write_buf(mtd, buf, mtd->writesize);
+
+       for (step = 0; step < ecc_steps; step++) {
+               /* mask ECC of un-touched subpages by padding 0xFF */
+               if (step < start_step || step > end_step)
+                       memset(ecc_calc, 0xff, ecc_bytes);
+               else
+                       ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+               if (ret)
+                       return ret;
+
+               buf += ecc_size;
+               ecc_calc += ecc_bytes;
+       }
+
+       /* copy calculated ECC for whole page to chip->buffer->oob */
+       /* this include masked-value(0xFF) for unwritten subpages */
+       ecc_calc = chip->buffers->ecccalc;
+       ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+                                        chip->ecc.total);
+       if (ret)
+               return ret;
+
+       /* write OOB buffer to NAND device */
+       chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+       return 0;
+}
+
 /**
  * omap_read_page_bch - BCH ecc based page read function for entire page
  * @mtd:               mtd info structure
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
                       chip->ecc.total);
 
        /* Calculate ecc bytes */
-       chip->ecc.calculate(mtd, buf, ecc_calc);
+       omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
 
        ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
                                         chip->ecc.total);
@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
        return true;
 }
 
-static bool omap2_nand_ecc_check(struct omap_nand_info *info,
-                                struct omap_nand_platform_data *pdata)
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
 {
        bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
 
@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
 static int omap_nand_probe(struct platform_device *pdev)
 {
        struct omap_nand_info           *info;
-       struct omap_nand_platform_data  *pdata = NULL;
        struct mtd_info                 *mtd;
        struct nand_chip                *nand_chip;
        int                             err;
@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
 
        info->pdev = pdev;
 
-       if (dev->of_node) {
-               if (omap_get_dt_info(dev, info))
-                       return -EINVAL;
-       } else {
-               pdata = dev_get_platdata(&pdev->dev);
-               if (!pdata) {
-                       dev_err(&pdev->dev, "platform data missing\n");
-                       return -EINVAL;
-               }
-
-               info->gpmc_cs = pdata->cs;
-               info->reg = pdata->reg;
-               info->ecc_opt = pdata->ecc_opt;
-               if (pdata->dev_ready)
-                       dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
-
-               info->xfer_type = pdata->xfer_type;
-               info->devsize = pdata->devsize;
-               info->elm_of_node = pdata->elm_of_node;
-               info->flash_bbt = pdata->flash_bbt;
-       }
+       err = omap_get_dt_info(dev, info);
+       if (err)
+               return err;
 
-       platform_set_drvdata(pdev, info);
        info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
        if (!info->ops) {
                dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
                goto return_error;
        }
 
-       if (!omap2_nand_ecc_check(info, pdata)) {
+       if (!omap2_nand_ecc_check(info)) {
                err = -EINVAL;
                goto return_error;
        }
@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
                nand_chip->ecc.strength         = 4;
                nand_chip->ecc.hwctl            = omap_enable_hwecc_bch;
                nand_chip->ecc.correct          = nand_bch_correct_data;
-               nand_chip->ecc.calculate        = omap_calculate_ecc_bch;
+               nand_chip->ecc.calculate        = omap_calculate_ecc_bch_sw;
                mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
                /* Reserve one byte for the OMAP marker */
                oobbytes_per_step               = nand_chip->ecc.bytes + 1;
@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
                nand_chip->ecc.strength         = 4;
                nand_chip->ecc.hwctl            = omap_enable_hwecc_bch;
                nand_chip->ecc.correct          = omap_elm_correct_data;
-               nand_chip->ecc.calculate        = omap_calculate_ecc_bch;
                nand_chip->ecc.read_page        = omap_read_page_bch;
                nand_chip->ecc.write_page       = omap_write_page_bch;
+               nand_chip->ecc.write_subpage    = omap_write_subpage_bch;
                mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
                oobbytes_per_step               = nand_chip->ecc.bytes;
 
@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
                nand_chip->ecc.strength         = 8;
                nand_chip->ecc.hwctl            = omap_enable_hwecc_bch;
                nand_chip->ecc.correct          = nand_bch_correct_data;
-               nand_chip->ecc.calculate        = omap_calculate_ecc_bch;
+               nand_chip->ecc.calculate        = omap_calculate_ecc_bch_sw;
                mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
                /* Reserve one byte for the OMAP marker */
                oobbytes_per_step               = nand_chip->ecc.bytes + 1;
@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
                nand_chip->ecc.strength         = 8;
                nand_chip->ecc.hwctl            = omap_enable_hwecc_bch;
                nand_chip->ecc.correct          = omap_elm_correct_data;
-               nand_chip->ecc.calculate        = omap_calculate_ecc_bch;
                nand_chip->ecc.read_page        = omap_read_page_bch;
                nand_chip->ecc.write_page       = omap_write_page_bch;
+               nand_chip->ecc.write_subpage    = omap_write_subpage_bch;
                mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
                oobbytes_per_step               = nand_chip->ecc.bytes;
 
@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
                nand_chip->ecc.strength         = 16;
                nand_chip->ecc.hwctl            = omap_enable_hwecc_bch;
                nand_chip->ecc.correct          = omap_elm_correct_data;
-               nand_chip->ecc.calculate        = omap_calculate_ecc_bch;
                nand_chip->ecc.read_page        = omap_read_page_bch;
                nand_chip->ecc.write_page       = omap_write_page_bch;
+               nand_chip->ecc.write_subpage    = omap_write_subpage_bch;
                mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
                oobbytes_per_step               = nand_chip->ecc.bytes;
 
@@ -2167,10 +2255,9 @@ scan_tail:
        if (err)
                goto return_error;
 
-       if (dev->of_node)
-               mtd_device_register(mtd, NULL, 0);
-       else
-               mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+       err = mtd_device_register(mtd, NULL, 0);
+       if (err)
+               goto return_error;
 
        platform_set_drvdata(pdev, mtd);
 
index 85cff68643e0bd3b5ab1e504277edf4b67fbd4e9..90b9a9ccbe60e3fad44855705bf2dc4623cd5347 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #define        CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
 #define NAND_STOP_DELAY                msecs_to_jiffies(40)
  */
 #define INIT_BUFFER_SIZE       2048
 
+/* System control register and bit to enable NAND on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+
 /* registers and bit definitions */
 #define NDCR           (0x00) /* Control register */
 #define NDTR0CS0       (0x04) /* Timing Parameter 0 for CS0 */
@@ -174,6 +180,7 @@ enum {
 enum pxa3xx_nand_variant {
        PXA3XX_NAND_VARIANT_PXA,
        PXA3XX_NAND_VARIANT_ARMADA370,
+       PXA3XX_NAND_VARIANT_ARMADA_8K,
 };
 
 struct pxa3xx_nand_host {
@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
                .compatible = "marvell,armada370-nand",
                .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
        },
+       {
+               .compatible = "marvell,armada-8k-nand",
+               .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
                info->retcode = ERR_UNCORERR;
        if (status & NDSR_CORERR) {
                info->retcode = ERR_CORERR;
-               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+               if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+                    info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
                    info->ecc_bch)
                        info->ecc_err_cnt = NDSR_ERR_CNT(status);
                else
@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
                nand_writel(info, NDCB0, info->ndcb2);
 
                /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
-               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+                   info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
                        nand_writel(info, NDCB0, info->ndcb3);
        }
 
@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
                chip->options |= NAND_BUSWIDTH_16;
 
        /* Device detection must be done with ECC disabled */
-       if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+       if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+           info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
                nand_writel(info, NDECCCTRL, 0x0);
 
        if (pdata->flash_bbt)
@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
         * (aka splitted) command handling,
         */
        if (mtd->writesize > PAGE_CHUNK_SIZE) {
-               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+                   info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
                        chip->cmdfunc = nand_cmdfunc_extended;
                } else {
                        dev_err(&info->pdev->dev,
@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
        if (!of_id)
                return 0;
 
+       /*
+        * Some SoCs like A7k/A8k need to enable manually the NAND
+        * controller to avoid being bootloader dependent. This is done
+        * through the use of a single bit in the System Functions registers.
+        */
+       if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+               struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
+                       pdev->dev.of_node, "marvell,system-controller");
+               u32 reg;
+
+               if (IS_ERR(sysctrl_base))
+                       return PTR_ERR(sysctrl_base);
+
+               regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
+               reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
+               regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+       }
+
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
                return -ENOMEM;
index 3baddfc997d139358ec63ff50f6d66c474e48ad9..2656c1ac5646e0e4bd43eeabb95bd507f926bf40 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/delay.h>
+#include <linux/dma/qcom_bam_dma.h>
 
 /* NANDc reg offsets */
 #define        NAND_FLASH_CMD                  0x00
@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg,                     \
  */
 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
 
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+       ((chip)->reg_read_dma + \
+       ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS       32
 #define QPIC_PER_CW_CMD_SGL            32
 #define QPIC_PER_CW_DATA_SGL           8
 
@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg,                     \
 /*
  * This data type corresponds to the BAM transaction which will be used for all
  * NAND transfers.
+ * @bam_ce - the array of BAM command elements
  * @cmd_sgl - sgl for NAND BAM command pipe
  * @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ *                for current sgl. It will be used for size calculation
+ *                for current sgl
  * @cmd_sgl_pos - current index in command sgl.
  * @cmd_sgl_start - start index in command sgl.
  * @tx_sgl_pos - current index in data sgl for tx.
@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg,                     \
  * @rx_sgl_start - start index in data sgl for rx.
  */
 struct bam_transaction {
+       struct bam_cmd_element *bam_ce;
        struct scatterlist *cmd_sgl;
        struct scatterlist *data_sgl;
+       u32 bam_ce_pos;
+       u32 bam_ce_start;
        u32 cmd_sgl_pos;
        u32 cmd_sgl_start;
        u32 tx_sgl_pos;
@@ -307,7 +325,8 @@ struct nandc_regs {
  *                             controller
  * @dev:                       parent device
  * @base:                      MMIO base
- * @base_dma:                  physical base address of controller registers
+ * @base_phys:                 physical base address of controller registers
+ * @base_dma:                  dma base address of controller registers
  * @core_clk:                  controller clock
  * @aon_clk:                   another controller clock
  *
@@ -340,6 +359,7 @@ struct qcom_nand_controller {
        struct device *dev;
 
        void __iomem *base;
+       phys_addr_t base_phys;
        dma_addr_t base_dma;
 
        struct clk *core_clk;
@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
 
        bam_txn_size =
                sizeof(*bam_txn) + num_cw *
-               ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+               ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+               (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
                (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
 
        bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
        bam_txn = bam_txn_buf;
        bam_txn_buf += sizeof(*bam_txn);
 
+       bam_txn->bam_ce = bam_txn_buf;
+       bam_txn_buf +=
+               sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
        bam_txn->cmd_sgl = bam_txn_buf;
        bam_txn_buf +=
                sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
        if (!nandc->props->is_bam)
                return;
 
+       bam_txn->bam_ce_pos = 0;
+       bam_txn->bam_ce_start = 0;
        bam_txn->cmd_sgl_pos = 0;
        bam_txn->cmd_sgl_start = 0;
        bam_txn->tx_sgl_pos = 0;
@@ -733,6 +760,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
        return 0;
 }
 
+/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+                                int reg_off, const void *vaddr,
+                                int size, unsigned int flags)
+{
+       int bam_ce_size;
+       int i, ret;
+       struct bam_cmd_element *bam_ce_buffer;
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+
+       bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+       /* fill the command desc */
+       for (i = 0; i < size; i++) {
+               if (read)
+                       bam_prep_ce(&bam_ce_buffer[i],
+                                   nandc_reg_phys(nandc, reg_off + 4 * i),
+                                   BAM_READ_COMMAND,
+                                   reg_buf_dma_addr(nandc,
+                                                    (__le32 *)vaddr + i));
+               else
+                       bam_prep_ce_le32(&bam_ce_buffer[i],
+                                        nandc_reg_phys(nandc, reg_off + 4 * i),
+                                        BAM_WRITE_COMMAND,
+                                        *((__le32 *)vaddr + i));
+       }
+
+       bam_txn->bam_ce_pos += size;
+
+       /* use the separate sgl after this command */
+       if (flags & NAND_BAM_NEXT_SGL) {
+               bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+               bam_ce_size = (bam_txn->bam_ce_pos -
+                               bam_txn->bam_ce_start) *
+                               sizeof(struct bam_cmd_element);
+               sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+                          bam_ce_buffer, bam_ce_size);
+               bam_txn->cmd_sgl_pos++;
+               bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+               if (flags & NAND_BAM_NWD) {
+                       ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+                                                    DMA_PREP_FENCE |
+                                                    DMA_PREP_CMD);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * Prepares the data descriptor for BAM DMA which will be used for NAND
  * data reads and writes.
@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
 {
        bool flow_control = false;
        void *vaddr;
-       int size;
 
-       if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-               flow_control = true;
+       vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+       nandc->reg_read_pos += num_regs;
 
        if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
                first = dev_cmd_reg_addr(nandc, first);
 
-       size = num_regs * sizeof(u32);
-       vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
-       nandc->reg_read_pos += num_regs;
+       if (nandc->props->is_bam)
+               return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+                                            num_regs, flags);
+
+       if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+               flow_control = true;
 
-       return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
+       return prep_adm_dma_desc(nandc, true, first, vaddr,
+                                num_regs * sizeof(u32), flow_control);
 }
 
 /*
@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
        bool flow_control = false;
        struct nandc_regs *regs = nandc->regs;
        void *vaddr;
-       int size;
 
        vaddr = offset_to_nandc_reg(regs, first);
 
-       if (first == NAND_FLASH_CMD)
-               flow_control = true;
-
        if (first == NAND_ERASED_CW_DETECT_CFG) {
                if (flags & NAND_ERASED_CW_SET)
                        vaddr = &regs->erased_cw_detect_cfg_set;
@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
        if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
                first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
 
-       size = num_regs * sizeof(u32);
+       if (nandc->props->is_bam)
+               return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+                                            num_regs, flags);
+
+       if (first == NAND_FLASH_CMD)
+               flow_control = true;
 
-       return prep_adm_dma_desc(nandc, false, first, vaddr, size,
-                                flow_control);
+       return prep_adm_dma_desc(nandc, false, first, vaddr,
+                                num_regs * sizeof(u32), flow_control);
 }
 
 /*
@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
                }
 
                if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
-                       r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
+                       r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+                                                  DMA_PREP_CMD);
                        if (r)
                                return r;
                }
@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
        if (IS_ERR(nandc->base))
                return PTR_ERR(nandc->base);
 
+       nandc->base_phys = res->start;
        nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
 
        nandc->core_clk = devm_clk_get(dev, "core");
index e7f3c98487e620bc902a56057a537d6f0a4d1819..3c5008a4f5f33accc4fa09d6cc95a2339daf868a 100644 (file)
@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
 
 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
 {
-       const struct of_device_id *match;
-       struct flctl_soc_config *config;
+       const struct flctl_soc_config *config;
        struct sh_flctl_platform_data *pdata;
 
-       match = of_match_device(of_flctl_match, dev);
-       if (match)
-               config = (struct flctl_soc_config *)match->data;
-       else {
+       config = of_device_get_match_data(dev);
+       if (!config) {
                dev_err(dev, "%s: no OF configuration attached\n", __func__);
                return NULL;
        }
index d206b3c533bcba60989c1c82eaa69c291d73e877..ee5ab994132fdc5d15e9c4dbcc43e0669a470fbd 100644 (file)
@@ -6,3 +6,11 @@ config MTD_PARSER_TRX
          may contain up to 3/4 partitions (depending on the version).
          This driver will parse TRX header and report at least two partitions:
          kernel and rootfs.
+
+config MTD_SHARPSL_PARTS
+       tristate "Sharp SL Series NAND flash partition parser"
+       depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST
+       help
+         This provides the read-only FTL logic necessary to read the partition
+         table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
+         partition parser using this code.
index 4d9024e0be3bf8505bd0a52f968a7ec6ab2f16e1..5b1bcc3d90d96b387723ba804e274f27dea13165 100644 (file)
@@ -1 +1,2 @@
 obj-$(CONFIG_MTD_PARSER_TRX)           += parser_trx.o
+obj-$(CONFIG_MTD_SHARPSL_PARTS)                += sharpslpart.o
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
new file mode 100644 (file)
index 0000000..5fe0079
--- /dev/null
@@ -0,0 +1,398 @@
+/*
+ * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
+ * for logical addressing, as used on the PXA models of the SHARP SL Series.
+ *
+ * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com>
+ *
+ * Based on SHARP GPL 2.4 sources:
+ *   http://support.ezaurus.com/developer/source/source_dl.asp
+ *     drivers/mtd/nand/sharp_sl_logical.c
+ *     linux/include/asm-arm/sharp_nand_logical.h
+ *
+ * Copyright (C) 2002 SHARP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/* oob structure */
+#define NAND_NOOB_LOGADDR_00           8
+#define NAND_NOOB_LOGADDR_01           9
+#define NAND_NOOB_LOGADDR_10           10
+#define NAND_NOOB_LOGADDR_11           11
+#define NAND_NOOB_LOGADDR_20           12
+#define NAND_NOOB_LOGADDR_21           13
+
+#define BLOCK_IS_RESERVED              0xffff
+#define BLOCK_UNMASK_COMPLEMENT                1
+
+/* factory defaults */
+#define SHARPSL_NAND_PARTS             3
+#define SHARPSL_FTL_PART_SIZE          (7 * SZ_1M)
+#define SHARPSL_PARTINFO1_LADDR                0x00060000
+#define SHARPSL_PARTINFO2_LADDR                0x00064000
+
+#define BOOT_MAGIC                     0x424f4f54
+#define FSRO_MAGIC                     0x4653524f
+#define FSRW_MAGIC                     0x46535257
+
+/**
+ * struct sharpsl_ftl - Sharp FTL Logical Table
+ * @logmax:            number of logical blocks
+ * @log2phy:           the logical-to-physical table
+ *
+ * Structure containing the logical-to-physical translation table
+ * used by the SHARP SL FTL.
+ */
+struct sharpsl_ftl {
+       unsigned int logmax;
+       unsigned int *log2phy;
+};
+
+/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
+static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
+{
+       u8 freebytes = 0;
+       int section = 0;
+
+       while (true) {
+               struct mtd_oob_region oobfree = { };
+               int ret, i;
+
+               ret = mtd_ooblayout_free(mtd, section++, &oobfree);
+               if (ret)
+                       break;
+
+               if (!oobfree.length || oobfree.offset > 15 ||
+                   (oobfree.offset + oobfree.length) < 8)
+                       continue;
+
+               i = oobfree.offset >= 8 ? oobfree.offset : 8;
+               for (; i < oobfree.offset + oobfree.length && i < 16; i++)
+                       freebytes |= BIT(i - 8);
+
+               if (freebytes == 0xff)
+                       return 0;
+       }
+
+       return -ENOTSUPP;
+}
+
+static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
+{
+       struct mtd_oob_ops ops = { };
+       int ret;
+
+       ops.mode = MTD_OPS_PLACE_OOB;
+       ops.ooblen = mtd->oobsize;
+       ops.oobbuf = buf;
+
+       ret = mtd_read_oob(mtd, offs, &ops);
+       if (ret != 0 || mtd->oobsize != ops.oobretlen)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * The logical block number assigned to a physical block is stored in the OOB
+ * of the first page, in 3 16-bit copies with the following layout:
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB   xyxyxy
+ *
+ * When reading we check that the first two copies agree.
+ * In case of error, matching is tried using the following pairs.
+ * Reserved values 0xffff mean the block is kept for wear leveling.
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB   xyxy    oob[8]==oob[10] && oob[9]==oob[11]   -> byte0=8   byte1=9
+ * ECC BB     xyxy  oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10  byte1=11
+ * ECC BB   xy  xy  oob[12]==oob[8] && oob[13]==oob[9]   -> byte0=12  byte1=13
+ */
+static int sharpsl_nand_get_logical_num(u8 *oob)
+{
+       u16 us;
+       int good0, good1;
+
+       if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
+           oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
+               good0 = NAND_NOOB_LOGADDR_00;
+               good1 = NAND_NOOB_LOGADDR_01;
+       } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
+                  oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
+               good0 = NAND_NOOB_LOGADDR_10;
+               good1 = NAND_NOOB_LOGADDR_11;
+       } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
+                  oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
+               good0 = NAND_NOOB_LOGADDR_20;
+               good1 = NAND_NOOB_LOGADDR_21;
+       } else {
+               return -EINVAL;
+       }
+
+       us = oob[good0] | oob[good1] << 8;
+
+       /* parity check */
+       if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
+               return -EINVAL;
+
+       /* reserved */
+       if (us == BLOCK_IS_RESERVED)
+               return BLOCK_IS_RESERVED;
+
+       return (us >> 1) & GENMASK(9, 0);
+}
+
+static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
+{
+       unsigned int block_num, log_num, phymax;
+       loff_t block_adr;
+       u8 *oob;
+       int i, ret;
+
+       oob = kzalloc(mtd->oobsize, GFP_KERNEL);
+       if (!oob)
+               return -ENOMEM;
+
+       phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
+
+       /* FTL reserves 5% of the blocks + 1 spare  */
+       ftl->logmax = ((phymax * 95) / 100) - 1;
+
+       ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
+                                    GFP_KERNEL);
+       if (!ftl->log2phy) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       /* initialize ftl->log2phy */
+       for (i = 0; i < ftl->logmax; i++)
+               ftl->log2phy[i] = UINT_MAX;
+
+       /* create physical-logical table */
+       for (block_num = 0; block_num < phymax; block_num++) {
+               block_adr = block_num * mtd->erasesize;
+
+               if (mtd_block_isbad(mtd, block_adr))
+                       continue;
+
+               if (sharpsl_nand_read_oob(mtd, block_adr, oob))
+                       continue;
+
+               /* get logical block */
+               log_num = sharpsl_nand_get_logical_num(oob);
+
+               /* cut-off errors and skip the out-of-range values */
+               if (log_num > 0 && log_num < ftl->logmax) {
+                       if (ftl->log2phy[log_num] == UINT_MAX)
+                               ftl->log2phy[log_num] = block_num;
+               }
+       }
+
+       pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
+               phymax, ftl->logmax, phymax - ftl->logmax);
+
+       ret = 0;
+exit:
+       kfree(oob);
+       return ret;
+}
+
+void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+{
+       kfree(ftl->log2phy);
+}
+
+static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
+                                  loff_t from,
+                                  size_t len,
+                                  void *buf,
+                                  struct sharpsl_ftl *ftl)
+{
+       unsigned int log_num, final_log_num;
+       unsigned int block_num;
+       loff_t block_adr;
+       loff_t block_ofs;
+       size_t retlen;
+       int err;
+
+       log_num = mtd_div_by_eb((u32)from, mtd);
+       final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
+
+       if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
+               return -EINVAL;
+
+       block_num = ftl->log2phy[log_num];
+       block_adr = block_num * mtd->erasesize;
+       block_ofs = mtd_mod_by_eb((u32)from, mtd);
+
+       err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
+       /* Ignore corrected ECC errors */
+       if (mtd_is_bitflip(err))
+               err = 0;
+
+       if (!err && retlen != len)
+               err = -EIO;
+
+       if (err)
+               pr_err("sharpslpart: error, read failed at %#llx\n",
+                      block_adr + block_ofs);
+
+       return err;
+}
+
+/*
+ * MTD Partition Parser
+ *
+ * Sample values read from SL-C860
+ *
+ * # cat /proc/mtd
+ * dev:    size   erasesize  name
+ * mtd0: 006d0000 00020000 "Filesystem"
+ * mtd1: 00700000 00004000 "smf"
+ * mtd2: 03500000 00004000 "root"
+ * mtd3: 04400000 00004000 "home"
+ *
+ * PARTITIONINFO1
+ * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00  ......p.BOOT....
+ * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00  ..p.....FSRO....
+ * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00  ........FSRW....
+ */
+struct sharpsl_nand_partinfo {
+       __le32 start;
+       __le32 end;
+       __be32 magic;
+       u32 reserved;
+};
+
+static int sharpsl_nand_read_partinfo(struct mtd_info *master,
+                                     loff_t from,
+                                     size_t len,
+                                     struct sharpsl_nand_partinfo *buf,
+                                     struct sharpsl_ftl *ftl)
+{
+       int ret;
+
+       ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
+       if (ret)
+               return ret;
+
+       /* check for magics */
+       if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
+           be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
+           be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
+               pr_err("sharpslpart: magic values mismatch\n");
+               return -EINVAL;
+       }
+
+       /* fixup for hardcoded value 64 MiB (for older models) */
+       buf[2].end = cpu_to_le32(master->size);
+
+       /* extra sanity check */
+       if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
+           le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
+           le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
+           le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
+           le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
+               pr_err("sharpslpart: partition sizes mismatch\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
+                                       const struct mtd_partition **pparts,
+                                       struct mtd_part_parser_data *data)
+{
+       struct sharpsl_ftl ftl;
+       struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
+       struct mtd_partition *sharpsl_nand_parts;
+       int err;
+
+       /* check that OOB bytes 8 to 15 used by the FTL are actually free */
+       err = sharpsl_nand_check_ooblayout(master);
+       if (err)
+               return err;
+
+       /* init logical mgmt (FTL) */
+       err = sharpsl_nand_init_ftl(master, &ftl);
+       if (err)
+               return err;
+
+       /* read and validate first partition table */
+       pr_info("sharpslpart: try reading first partition table\n");
+       err = sharpsl_nand_read_partinfo(master,
+                                        SHARPSL_PARTINFO1_LADDR,
+                                        sizeof(buf), buf, &ftl);
+       if (err) {
+               /* fallback: read second partition table */
+               pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
+               err = sharpsl_nand_read_partinfo(master,
+                                                SHARPSL_PARTINFO2_LADDR,
+                                                sizeof(buf), buf, &ftl);
+       }
+
+       /* cleanup logical mgmt (FTL) */
+       sharpsl_nand_cleanup_ftl(&ftl);
+
+       if (err) {
+               pr_err("sharpslpart: both partition tables are invalid\n");
+               return err;
+       }
+
+       sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) *
+                                    SHARPSL_NAND_PARTS, GFP_KERNEL);
+       if (!sharpsl_nand_parts)
+               return -ENOMEM;
+
+       /* original names */
+       sharpsl_nand_parts[0].name = "smf";
+       sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
+       sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
+                                    le32_to_cpu(buf[0].start);
+
+       sharpsl_nand_parts[1].name = "root";
+       sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
+       sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
+                                    le32_to_cpu(buf[1].start);
+
+       sharpsl_nand_parts[2].name = "home";
+       sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
+       sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
+                                    le32_to_cpu(buf[2].start);
+
+       *pparts = sharpsl_nand_parts;
+       return SHARPSL_NAND_PARTS;
+}
+
+static struct mtd_part_parser sharpsl_mtd_parser = {
+       .parse_fn = sharpsl_parse_mtd_partitions,
+       .name = "sharpslpart",
+};
+module_mtd_part_parser(sharpsl_mtd_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
+MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
index 3692dd5478799f044bbf56895e467b144074b00a..4237c7cebf0210dfe2d2b0684b5fc9b8fbea56d6 100644 (file)
@@ -989,9 +989,9 @@ restart:
 
 
 /* flush timer, runs a second after last write */
-static void sm_cache_flush_timer(unsigned long data)
+static void sm_cache_flush_timer(struct timer_list *t)
 {
-       struct sm_ftl *ftl = (struct sm_ftl *)data;
+       struct sm_ftl *ftl = from_timer(ftl, t, timer);
        queue_work(cache_flush_workqueue, &ftl->flush_work);
 }
 
@@ -1139,7 +1139,7 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
 
 
        mutex_init(&ftl->mutex);
-       setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+       timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
        INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
        init_completion(&ftl->erase_completion);
 
index 69c638dd04848be10540cdd26e1c0327833d3668..89da88e591215db1a9689c73c019468b24a2fb82 100644 (file)
@@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI
 
 config SPI_CADENCE_QUADSPI
        tristate "Cadence Quad SPI controller"
-       depends on OF && (ARM || COMPILE_TEST)
+       depends on OF && (ARM || ARM64 || COMPILE_TEST)
        help
          Enable support for the Cadence Quad SPI Flash controller.
 
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI
        tristate
 
 config SPI_INTEL_SPI_PCI
-       tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT
+       tristate "Intel PCH/PCU SPI flash PCI driver"
        depends on X86 && PCI
        select SPI_INTEL_SPI
        help
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
          will be called intel-spi-pci.
 
 config SPI_INTEL_SPI_PLATFORM
-       tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
+       tristate "Intel PCH/PCU SPI flash platform driver"
        depends on X86
        select SPI_INTEL_SPI
        help
index 53c7d8e0327aa4376bdbb3cb00d78babf70349a4..75a2bc447a99d1561fd5ab5adb37a2807d2308b3 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/sched.h>
 #include <linux/spi/spi.h>
 #include <linux/timer.h>
@@ -38,6 +39,9 @@
 #define CQSPI_NAME                     "cadence-qspi"
 #define CQSPI_MAX_CHIPSELECT           16
 
+/* Quirks */
+#define CQSPI_NEEDS_WR_DELAY           BIT(0)
+
 struct cqspi_st;
 
 struct cqspi_flash_pdata {
@@ -75,7 +79,9 @@ struct cqspi_st {
        bool                    is_decoded_cs;
        u32                     fifo_depth;
        u32                     fifo_width;
+       bool                    rclk_en;
        u32                     trigger_address;
+       u32                     wr_delay;
        struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
 };
 
@@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
        reinit_completion(&cqspi->transfer_complete);
        writel(CQSPI_REG_INDIRECTWR_START_MASK,
               reg_base + CQSPI_REG_INDIRECTWR);
+       /*
+        * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+        * Controller programming sequence, couple of cycles of
+        * QSPI_REF_CLK delay is required for the above bit to
+        * be internally synchronized by the QSPI module. Provide 5
+        * cycles of delay.
+        */
+       if (cqspi->wr_delay)
+               ndelay(cqspi->wr_delay);
 
        while (remaining > 0) {
                write_bytes = remaining > page_size ? page_size : remaining;
@@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
 }
 
 static void cqspi_readdata_capture(struct cqspi_st *cqspi,
-                                  const unsigned int bypass,
+                                  const bool bypass,
                                   const unsigned int delay)
 {
        void __iomem *reg_base = cqspi->iobase;
@@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor)
                cqspi->sclk = sclk;
                cqspi_config_baudrate_div(cqspi);
                cqspi_delay(nor);
-               cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+               cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+                                      f_pdata->read_delay);
        }
 
        if (switch_cs || switch_ck)
@@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
                return -ENXIO;
        }
 
+       cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+
        return 0;
 }
 
@@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev)
        struct cqspi_st *cqspi;
        struct resource *res;
        struct resource *res_ahb;
+       unsigned long data;
        int ret;
        int irq;
 
@@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               pm_runtime_put_noidle(dev);
+               return ret;
+       }
+
        ret = clk_prepare_enable(cqspi->clk);
        if (ret) {
                dev_err(dev, "Cannot enable QSPI clock.\n");
-               return ret;
+               goto probe_clk_failed;
        }
 
        cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+       data  = (unsigned long)of_device_get_match_data(dev);
+       if (data & CQSPI_NEEDS_WR_DELAY)
+               cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+                                                  cqspi->master_ref_clk_hz);
 
        ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
                               pdev->name, cqspi);
@@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev)
        }
 
        return ret;
-probe_irq_failed:
-       cqspi_controller_enable(cqspi, 0);
 probe_setup_failed:
+       cqspi_controller_enable(cqspi, 0);
+probe_irq_failed:
        clk_disable_unprepare(cqspi->clk);
+probe_clk_failed:
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
        return ret;
 }
 
@@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev)
 
        clk_disable_unprepare(cqspi->clk);
 
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
        return 0;
 }
 
@@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
 #endif
 
 static const struct of_device_id cqspi_dt_ids[] = {
-       {.compatible = "cdns,qspi-nor",},
+       {
+               .compatible = "cdns,qspi-nor",
+               .data = (void *)0,
+       },
+       {
+               .compatible = "ti,k2g-qspi",
+               .data = (void *)CQSPI_NEEDS_WR_DELAY,
+       },
        { /* end of table */ }
 };
 
index e82652335ede806b4db25bc6e463bbc77e5deb10..c0976f2e3dd19925b06f155bd093176818465720 100644 (file)
@@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
 }
 
 static const struct pci_device_id intel_spi_pci_ids[] = {
+       { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
        { },
 };
 MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
index 8a596bfeddff6ce87db49f8fd2047d6bd355efd9..ef034d898a2363ea6e90672eca7264e8c8e4fa88 100644 (file)
@@ -67,8 +67,6 @@
 #define PR_LIMIT_MASK                  (0x3fff << PR_LIMIT_SHIFT)
 #define PR_RPE                         BIT(15)
 #define PR_BASE_MASK                   0x3fff
-/* Last PR is GPR0 */
-#define PR_NUM                         (5 + 1)
 
 /* Offsets are from @ispi->sregs */
 #define SSFSTS_CTL                     0x00
 #define OPMENU0                                0x08
 #define OPMENU1                                0x0c
 
+#define OPTYPE_READ_NO_ADDR            0
+#define OPTYPE_WRITE_NO_ADDR           1
+#define OPTYPE_READ_WITH_ADDR          2
+#define OPTYPE_WRITE_WITH_ADDR         3
+
 /* CPU specifics */
 #define BYT_PR                         0x74
 #define BYT_SSFSTS_CTL                 0x90
 #define BYT_BCR                                0xfc
 #define BYT_BCR_WPD                    BIT(0)
 #define BYT_FREG_NUM                   5
+#define BYT_PR_NUM                     5
 
 #define LPT_PR                         0x74
 #define LPT_SSFSTS_CTL                 0x90
 #define LPT_FREG_NUM                   5
+#define LPT_PR_NUM                     5
 
 #define BXT_PR                         0x84
 #define BXT_SSFSTS_CTL                 0xa0
 #define BXT_FREG_NUM                   12
+#define BXT_PR_NUM                     6
+
+#define LVSCC                          0xc4
+#define UVSCC                          0xc8
+#define ERASE_OPCODE_SHIFT             8
+#define ERASE_OPCODE_MASK              (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_SHIFT         16
+#define ERASE_64K_OPCODE_MASK          (0xff << ERASE_OPCODE_SHIFT)
 
 #define INTEL_SPI_TIMEOUT              5000 /* ms */
 #define INTEL_SPI_FIFO_SZ              64
  * @pregs: Start of protection registers
  * @sregs: Start of software sequencer registers
  * @nregions: Maximum number of regions
+ * @pr_num: Maximum number of protected range registers
  * @writeable: Is the chip writeable
- * @swseq: Use SW sequencer in register reads/writes
+ * @locked: Is SPI setting locked
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
  * @erase_64k: 64k erase supported
  * @opcodes: Opcodes which are supported. This are programmed by BIOS
  *           before it locks down the controller.
@@ -132,8 +148,11 @@ struct intel_spi {
        void __iomem *pregs;
        void __iomem *sregs;
        size_t nregions;
+       size_t pr_num;
        bool writeable;
-       bool swseq;
+       bool locked;
+       bool swseq_reg;
+       bool swseq_erase;
        bool erase_64k;
        u8 opcodes[8];
        u8 preopcodes[2];
@@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
        for (i = 0; i < ispi->nregions; i++)
                dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
                        readl(ispi->base + FREG(i)));
-       for (i = 0; i < PR_NUM; i++)
+       for (i = 0; i < ispi->pr_num; i++)
                dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
                        readl(ispi->pregs + PR(i)));
 
@@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
        if (ispi->info->type == INTEL_SPI_BYT)
                dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
 
+       dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
+       dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
+
        dev_dbg(ispi->dev, "Protected regions:\n");
-       for (i = 0; i < PR_NUM; i++) {
+       for (i = 0; i < ispi->pr_num; i++) {
                u32 base, limit;
 
                value = readl(ispi->pregs + PR(i));
@@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
        }
 
        dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
-               ispi->swseq ? 'S' : 'H');
+               ispi->swseq_reg ? 'S' : 'H');
+       dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
+               ispi->swseq_erase ? 'S' : 'H');
 }
 
 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
@@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
 
 static int intel_spi_init(struct intel_spi *ispi)
 {
-       u32 opmenu0, opmenu1, val;
+       u32 opmenu0, opmenu1, lvscc, uvscc, val;
        int i;
 
        switch (ispi->info->type) {
@@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi)
                ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
                ispi->pregs = ispi->base + BYT_PR;
                ispi->nregions = BYT_FREG_NUM;
+               ispi->pr_num = BYT_PR_NUM;
+               ispi->swseq_reg = true;
 
                if (writeable) {
                        /* Disable write protection */
@@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi)
                ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
                ispi->pregs = ispi->base + LPT_PR;
                ispi->nregions = LPT_FREG_NUM;
+               ispi->pr_num = LPT_PR_NUM;
+               ispi->swseq_reg = true;
                break;
 
        case INTEL_SPI_BXT:
                ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
                ispi->pregs = ispi->base + BXT_PR;
                ispi->nregions = BXT_FREG_NUM;
+               ispi->pr_num = BXT_PR_NUM;
                ispi->erase_64k = true;
                break;
 
@@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi)
                return -EINVAL;
        }
 
-       /* Disable #SMI generation */
+       /* Disable #SMI generation from HW sequencer */
        val = readl(ispi->base + HSFSTS_CTL);
        val &= ~HSFSTS_CTL_FSMIE;
        writel(val, ispi->base + HSFSTS_CTL);
 
        /*
-        * BIOS programs allowed opcodes and then locks down the register.
-        * So read back what opcodes it decided to support. That's the set
-        * we are going to support as well.
+        * Determine whether erase operation should use HW or SW sequencer.
+        *
+        * The HW sequencer has a predefined list of opcodes, with only the
+        * erase opcode being programmable in LVSCC and UVSCC registers.
+        * If these registers don't contain a valid erase opcode, erase
+        * cannot be done using HW sequencer.
         */
-       opmenu0 = readl(ispi->sregs + OPMENU0);
-       opmenu1 = readl(ispi->sregs + OPMENU1);
+       lvscc = readl(ispi->base + LVSCC);
+       uvscc = readl(ispi->base + UVSCC);
+       if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+               ispi->swseq_erase = true;
+       /* SPI controller on Intel BXT supports 64K erase opcode */
+       if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+               if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+                   !(uvscc & ERASE_64K_OPCODE_MASK))
+                       ispi->erase_64k = false;
 
        /*
         * Some controllers can only do basic operations using hardware
         * sequencer. All other operations are supposed to be carried out
-        * using software sequencer. If we find that BIOS has programmed
-        * opcodes for the software sequencer we use that over the hardware
-        * sequencer.
+        * using software sequencer.
         */
-       if (opmenu0 && opmenu1) {
-               for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
-                       ispi->opcodes[i] = opmenu0 >> i * 8;
-                       ispi->opcodes[i + 4] = opmenu1 >> i * 8;
-               }
-
-               val = readl(ispi->sregs + PREOP_OPTYPE);
-               ispi->preopcodes[0] = val;
-               ispi->preopcodes[1] = val >> 8;
-
+       if (ispi->swseq_reg) {
                /* Disable #SMI generation from SW sequencer */
                val = readl(ispi->sregs + SSFSTS_CTL);
                val &= ~SSFSTS_CTL_FSMIE;
                writel(val, ispi->sregs + SSFSTS_CTL);
+       }
+
+       /* Check controller's lock status */
+       val = readl(ispi->base + HSFSTS_CTL);
+       ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+       if (ispi->locked) {
+               /*
+                * BIOS programs allowed opcodes and then locks down the
+                * register. So read back what opcodes it decided to support.
+                * That's the set we are going to support as well.
+                */
+               opmenu0 = readl(ispi->sregs + OPMENU0);
+               opmenu1 = readl(ispi->sregs + OPMENU1);
 
-               ispi->swseq = true;
+               if (opmenu0 && opmenu1) {
+                       for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+                               ispi->opcodes[i] = opmenu0 >> i * 8;
+                               ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+                       }
+
+                       val = readl(ispi->sregs + PREOP_OPTYPE);
+                       ispi->preopcodes[0] = val;
+                       ispi->preopcodes[1] = val >> 8;
+               }
        }
 
        intel_spi_dump_regs(ispi);
@@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi)
        return 0;
 }
 
-static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
 {
        int i;
+       int preop;
 
-       for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
-               if (ispi->opcodes[i] == opcode)
-                       return i;
-       return -EINVAL;
+       if (ispi->locked) {
+               for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+                       if (ispi->opcodes[i] == opcode)
+                               return i;
+
+               return -EINVAL;
+       }
+
+       /* The lock is off, so just use index 0 */
+       writel(opcode, ispi->sregs + OPMENU0);
+       preop = readw(ispi->sregs + PREOP_OPTYPE);
+       writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
+
+       return 0;
 }
 
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
-                             int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
 {
        u32 val, status;
        int ret;
@@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
                return -EINVAL;
        }
 
+       if (len > INTEL_SPI_FIFO_SZ)
+               return -EINVAL;
+
        val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
        val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
        val |= HSFSTS_CTL_FGO;
@@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
        return 0;
 }
 
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
-                             int len)
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+                             int optype)
 {
-       u32 val, status;
+       u32 val = 0, status;
+       u16 preop;
        int ret;
 
-       ret = intel_spi_opcode_index(ispi, opcode);
+       ret = intel_spi_opcode_index(ispi, opcode, optype);
        if (ret < 0)
                return ret;
 
-       val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+       if (len > INTEL_SPI_FIFO_SZ)
+               return -EINVAL;
+
+       /* Only mark 'Data Cycle' bit when there is data to be transferred */
+       if (len > 0)
+               val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
        val |= ret << SSFSTS_CTL_COP_SHIFT;
        val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
        val |= SSFSTS_CTL_SCGO;
+       preop = readw(ispi->sregs + PREOP_OPTYPE);
+       if (preop) {
+               val |= SSFSTS_CTL_ACS;
+               if (preop >> 8)
+                       val |= SSFSTS_CTL_SPOP;
+       }
        writel(val, ispi->sregs + SSFSTS_CTL);
 
        ret = intel_spi_wait_sw_busy(ispi);
        if (ret)
                return ret;
 
-       status = readl(ispi->base + SSFSTS_CTL);
+       status = readl(ispi->sregs + SSFSTS_CTL);
        if (status & SSFSTS_CTL_FCERR)
                return -EIO;
        else if (status & SSFSTS_CTL_AEL)
@@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
        /* Address of the first chip */
        writel(0, ispi->base + FADDR);
 
-       if (ispi->swseq)
-               ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
+       if (ispi->swseq_reg)
+               ret = intel_spi_sw_cycle(ispi, opcode, len,
+                                        OPTYPE_READ_NO_ADDR);
        else
-               ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
+               ret = intel_spi_hw_cycle(ispi, opcode, len);
 
        if (ret)
                return ret;
@@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
 
        /*
         * This is handled with atomic operation and preop code in Intel
-        * controller so skip it here now.
+        * controller so skip it here now. If the controller is not locked,
+        * program the opcode to the PREOP register for later use.
         */
-       if (opcode == SPINOR_OP_WREN)
+       if (opcode == SPINOR_OP_WREN) {
+               if (!ispi->locked)
+                       writel(opcode, ispi->sregs + PREOP_OPTYPE);
+
                return 0;
+       }
 
        writel(0, ispi->base + FADDR);
 
@@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
        if (ret)
                return ret;
 
-       if (ispi->swseq)
-               return intel_spi_sw_cycle(ispi, opcode, buf, len);
-       return intel_spi_hw_cycle(ispi, opcode, buf, len);
+       if (ispi->swseq_reg)
+               return intel_spi_sw_cycle(ispi, opcode, len,
+                                         OPTYPE_WRITE_NO_ADDR);
+       return intel_spi_hw_cycle(ispi, opcode, len);
 }
 
 static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
@@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
                val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
                val |= HSFSTS_CTL_FCYCLE_WRITE;
 
-               /* Write enable */
-               if (ispi->preopcodes[1] == SPINOR_OP_WREN)
-                       val |= SSFSTS_CTL_SPOP;
-               val |= SSFSTS_CTL_ACS;
-               writel(val, ispi->base + HSFSTS_CTL);
-
                ret = intel_spi_write_block(ispi, write_buf, block_size);
                if (ret) {
                        dev_err(ispi->dev, "failed to write block\n");
@@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
                }
 
                /* Start the write now */
-               val = readl(ispi->base + HSFSTS_CTL);
-               writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
+               val |= HSFSTS_CTL_FGO;
+               writel(val, ispi->base + HSFSTS_CTL);
 
                ret = intel_spi_wait_hw_busy(ispi);
                if (ret) {
@@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
                erase_size = SZ_4K;
        }
 
+       if (ispi->swseq_erase) {
+               while (len > 0) {
+                       writel(offs, ispi->base + FADDR);
+
+                       ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
+                                                0, OPTYPE_WRITE_WITH_ADDR);
+                       if (ret)
+                               return ret;
+
+                       offs += erase_size;
+                       len -= erase_size;
+               }
+
+               return 0;
+       }
+
        while (len > 0) {
                writel(offs, ispi->base + FADDR);
 
@@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi,
 {
        int i;
 
-       for (i = 0; i < PR_NUM; i++) {
+       for (i = 0; i < ispi->pr_num; i++) {
                u32 pr_base, pr_limit, pr_value;
 
                pr_value = readl(ispi->pregs + PR(i));
index c258c7adf1c5198a96c434e349bcd2f800415204..abe455ccd68be9cdf3cfe5f70f7c61be4d17c880 100644 (file)
@@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
        return ret;
 }
 
+static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+{
+       clk_disable_unprepare(mt8173_nor->spi_clk);
+       clk_disable_unprepare(mt8173_nor->nor_clk);
+}
+
+static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+{
+       int ret;
+
+       ret = clk_prepare_enable(mt8173_nor->spi_clk);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(mt8173_nor->nor_clk);
+       if (ret) {
+               clk_disable_unprepare(mt8173_nor->spi_clk);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
                        struct device_node *flash_node)
 {
@@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
                return PTR_ERR(mt8173_nor->nor_clk);
 
        mt8173_nor->dev = &pdev->dev;
-       ret = clk_prepare_enable(mt8173_nor->spi_clk);
+
+       ret = mt8173_nor_enable_clk(mt8173_nor);
        if (ret)
                return ret;
 
-       ret = clk_prepare_enable(mt8173_nor->nor_clk);
-       if (ret) {
-               clk_disable_unprepare(mt8173_nor->spi_clk);
-               return ret;
-       }
        /* only support one attached flash */
        flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
        if (!flash_np) {
@@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
        ret = mtk_nor_init(mt8173_nor, flash_np);
 
 nor_free:
-       if (ret) {
-               clk_disable_unprepare(mt8173_nor->spi_clk);
-               clk_disable_unprepare(mt8173_nor->nor_clk);
-       }
+       if (ret)
+               mt8173_nor_disable_clk(mt8173_nor);
+
        return ret;
 }
 
@@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
 {
        struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
 
-       clk_disable_unprepare(mt8173_nor->spi_clk);
-       clk_disable_unprepare(mt8173_nor->nor_clk);
+       mt8173_nor_disable_clk(mt8173_nor);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nor_suspend(struct device *dev)
+{
+       struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+       mt8173_nor_disable_clk(mt8173_nor);
+
        return 0;
 }
 
+static int mtk_nor_resume(struct device *dev)
+{
+       struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+       return mt8173_nor_enable_clk(mt8173_nor);
+}
+
+static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
+       .suspend = mtk_nor_suspend,
+       .resume = mtk_nor_resume,
+};
+
+#define MTK_NOR_DEV_PM_OPS     (&mtk_nor_dev_pm_ops)
+#else
+#define MTK_NOR_DEV_PM_OPS     NULL
+#endif
+
 static const struct of_device_id mtk_nor_of_ids[] = {
        { .compatible = "mediatek,mt8173-nor"},
        { /* sentinel */ }
@@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = {
        .remove = mtk_nor_drv_remove,
        .driver = {
                .name = "mtk-nor",
+               .pm = MTK_NOR_DEV_PM_OPS,
                .of_match_table = mtk_nor_of_ids,
        },
 };
index 19c000722cbc86f3246c0b84055eca92db331424..bc266f70a15b319f003916bd08184c2f8829a092 100644 (file)
@@ -89,6 +89,8 @@ struct flash_info {
 #define NO_CHIP_ERASE          BIT(12) /* Chip does not support chip erase */
 #define SPI_NOR_SKIP_SFDP      BIT(13) /* Skip parsing of SFDP tables */
 #define USE_CLSR               BIT(14) /* use CLSR command */
+
+       int     (*quad_enable)(struct spi_nor *nor);
 };
 
 #define JEDEC_MFR(info)        ((info)->id[0])
@@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
        return ret;
 }
 
+static int macronix_quad_enable(struct spi_nor *nor);
+
 /* Used when the "_ext_id" is two bytes at most */
 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)     \
                .id = {                                                 \
@@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = {
        { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
 
        /* Everspin */
+       { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
        { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
        { "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
        { "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -982,6 +987,11 @@ static const struct flash_info spi_nor_ids[] = {
                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
        },
+       {
+               "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
        {
                "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
@@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = {
                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                        SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
        },
+       {
+               "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+                       .quad_enable = macronix_quad_enable,
+       },
 
        /* Intel/Numonyx -- xxxs33b */
        { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
@@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = {
        { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
        { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
-       { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+       { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
        { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
        { "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
@@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = {
        { "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
        { "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
        { "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
+       {
+               "w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
        { "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
        { "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
        { "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
@@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
 
        /* Check the SFDP header version. */
        if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
-           header.major != SFDP_JESD216_MAJOR ||
-           header.minor < SFDP_JESD216_MINOR)
+           header.major != SFDP_JESD216_MAJOR)
                return -EINVAL;
 
        /*
@@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
                        params->quad_enable = spansion_quad_enable;
                        break;
                }
+
+               /*
+                * Some manufacturer like GigaDevice may use different
+                * bit to set QE on different memories, so the MFR can't
+                * indicate the quad_enable method for this case, we need
+                * set it in flash info list.
+                */
+               if (info->quad_enable)
+                       params->quad_enable = info->quad_enable;
        }
 
        /* Override the parameters with data read from SFDP tables. */
@@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
        /* Enable Quad I/O if needed. */
        enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
                          spi_nor_get_protocol_width(nor->write_proto) == 4);
-       if (enable_quad_io && params->quad_enable) {
-               err = params->quad_enable(nor);
+       if (enable_quad_io && params->quad_enable)
+               nor->quad_enable = params->quad_enable;
+       else
+               nor->quad_enable = NULL;
+
+       return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+       int err;
+
+       /*
+        * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+        * with the software protection bits set
+        */
+       if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+           JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+           JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+           nor->info->flags & SPI_NOR_HAS_LOCK) {
+               write_enable(nor);
+               write_sr(nor, 0);
+               spi_nor_wait_till_ready(nor);
+       }
+
+       if (nor->quad_enable) {
+               err = nor->quad_enable(nor);
                if (err) {
                        dev_err(nor->dev, "quad mode not supported\n");
                        return err;
                }
        }
 
+       if ((nor->addr_width == 4) &&
+           (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+           !(nor->info->flags & SPI_NOR_4B_OPCODES))
+               set_4byte(nor, nor->info, 1);
+
        return 0;
 }
 
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+       struct spi_nor *nor = mtd_to_spi_nor(mtd);
+       struct device *dev = nor->dev;
+       int ret;
+
+       /* re-initialize the nor chip */
+       ret = spi_nor_init(nor);
+       if (ret)
+               dev_err(dev, "resume() failed\n");
+}
+
 int spi_nor_scan(struct spi_nor *nor, const char *name,
                 const struct spi_nor_hwcaps *hwcaps)
 {
@@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
        if (ret)
                return ret;
 
-       /*
-        * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
-        * with the software protection bits set
-        */
-
-       if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
-           JEDEC_MFR(info) == SNOR_MFR_INTEL ||
-           JEDEC_MFR(info) == SNOR_MFR_SST ||
-           info->flags & SPI_NOR_HAS_LOCK) {
-               write_enable(nor);
-               write_sr(nor, 0);
-               spi_nor_wait_till_ready(nor);
-       }
-
        if (!mtd->name)
                mtd->name = dev_name(dev);
        mtd->priv = nor;
@@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
        mtd->size = params.size;
        mtd->_erase = spi_nor_erase;
        mtd->_read = spi_nor_read;
+       mtd->_resume = spi_nor_resume;
 
        /* NOR protection support for STmicro/Micron chips and similar */
        if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
@@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
                if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
                    info->flags & SPI_NOR_4B_OPCODES)
                        spi_nor_set_4byte_opcodes(nor, info);
-               else
-                       set_4byte(nor, info, 1);
        } else {
                nor->addr_width = 3;
        }
@@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
                        return ret;
        }
 
+       /* Send all the required SPI flash commands to initialize device */
+       nor->info = info;
+       ret = spi_nor_init(nor);
+       if (ret)
+               return ret;
+
        dev_info(dev, "%s (%lld Kbytes)\n", info->name,
                        (long long)mtd->size >> 10);
 
index 86c0931543c538c340421786db8bc4ef5d88a55c..b3c7f6addba79eed849dcb89d3c2547023c3b0e8 100644 (file)
@@ -1,9 +1,22 @@
 /*
- * stm32_quadspi.c
+ * Driver for stm32 quadspi controller
  *
- * Copyright (C) 2017, Ludovic Barre
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
  *
- * License terms: GNU General Public License (GPL), version 2
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * This program. If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/clk.h>
 #include <linux/errno.h>
 #define STM32_MAX_MMAP_SZ      SZ_256M
 #define STM32_MAX_NORCHIP      2
 
+#define STM32_QSPI_FIFO_SZ     32
 #define STM32_QSPI_FIFO_TIMEOUT_US 30000
 #define STM32_QSPI_BUSY_TIMEOUT_US 100000
 
@@ -124,6 +138,7 @@ struct stm32_qspi_flash {
        u32 presc;
        u32 read_mode;
        bool registered;
+       u32 prefetch_limit;
 };
 
 struct stm32_qspi {
@@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
                                                 STM32_QSPI_FIFO_TIMEOUT_US);
                if (ret) {
                        dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
-                       break;
+                       return ret;
                }
                tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
        }
 
-       return ret;
+       return 0;
 }
 
 static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
@@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
 {
        struct stm32_qspi *qspi = flash->qspi;
        u32 ccr, dcr, cr;
+       u32 last_byte;
        int err;
 
        err = stm32_qspi_wait_nobusy(qspi);
@@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
                if (err)
                        goto abort;
                writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
+       } else {
+               last_byte = cmd->addr + cmd->len;
+               if (last_byte > flash->prefetch_limit)
+                       goto abort;
        }
 
        return err;
@@ -322,7 +342,9 @@ abort:
        cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
        writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
 
-       dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+       if (err)
+               dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+
        return err;
 }
 
@@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
        }
 
        flash->fsize = FSIZE_VAL(mtd->size);
+       flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
 
        flash->read_mode = CCR_FMODE_MM;
        if (mtd->size > qspi->mm_size)
index a1b33aa6054a8b8f5115f57990d9efd7034ccfd2..9697977b80f040c3fde59037e2caabe0cdfaa79f 100644 (file)
@@ -423,7 +423,7 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
                        return -EINVAL;
 
                bond_opt_initval(&newval,
-                                nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+                                nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
                err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
                if (err)
                        return err;
index fed75e75207a2472c341695b227e3daad738ada2..b8029ea03307f75322647be894bae56176da3aef 100644 (file)
@@ -66,9 +66,9 @@ static const struct cfhsi_config  hsi_default_config = {
 
 static LIST_HEAD(cfhsi_list);
 
-static void cfhsi_inactivity_tout(unsigned long arg)
+static void cfhsi_inactivity_tout(struct timer_list *t)
 {
-       struct cfhsi *cfhsi = (struct cfhsi *)arg;
+       struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
 
        netdev_dbg(cfhsi->ndev, "%s.\n",
                __func__);
@@ -737,9 +737,9 @@ out_of_sync:
        schedule_work(&cfhsi->out_of_sync_work);
 }
 
-static void cfhsi_rx_slowpath(unsigned long arg)
+static void cfhsi_rx_slowpath(struct timer_list *t)
 {
-       struct cfhsi *cfhsi = (struct cfhsi *)arg;
+       struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
 
        netdev_dbg(cfhsi->ndev, "%s.\n",
                __func__);
@@ -997,9 +997,9 @@ static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
        wake_up_interruptible(&cfhsi->wake_down_wait);
 }
 
-static void cfhsi_aggregation_tout(unsigned long arg)
+static void cfhsi_aggregation_tout(struct timer_list *t)
 {
-       struct cfhsi *cfhsi = (struct cfhsi *)arg;
+       struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
 
        netdev_dbg(cfhsi->ndev, "%s.\n",
                __func__);
@@ -1211,14 +1211,11 @@ static int cfhsi_open(struct net_device *ndev)
        init_waitqueue_head(&cfhsi->flush_fifo_wait);
 
        /* Setup the inactivity timer. */
-       setup_timer(&cfhsi->inactivity_timer, cfhsi_inactivity_tout,
-                   (unsigned long)cfhsi);
+       timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
        /* Setup the slowpath RX timer. */
-       setup_timer(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath,
-                   (unsigned long)cfhsi);
+       timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
        /* Setup the aggregation timer. */
-       setup_timer(&cfhsi->aggregation_timer, cfhsi_aggregation_tout,
-                   (unsigned long)cfhsi);
+       timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
 
        /* Activate HSI interface. */
        res = cfhsi->ops->cfhsi_up(cfhsi->ops);
index b6e2bfd7d2d6aae5f05794c71f818e14a844a6b4..8b1a859f5140c3ebbd9bd5e190112a7835afbe2c 100644 (file)
@@ -165,9 +165,16 @@ static unsigned int network_rec_config_shadow = 0;
 
 static unsigned int network_tr_ctrl_shadow = 0;
 
+/* Timers */
+static void e100_check_speed(struct timer_list *unused);
+static void e100_clear_network_leds(struct timer_list *unused);
+static void e100_check_duplex(struct timer_list *unused);
+static DEFINE_TIMER(speed_timer, e100_check_speed);
+static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds);
+static DEFINE_TIMER(duplex_timer, e100_check_duplex);
+static struct net_device *timer_dev;
+
 /* Network speed indication. */
-static DEFINE_TIMER(speed_timer, NULL);
-static DEFINE_TIMER(clear_led_timer, NULL);
 static int current_speed; /* Speed read from transceiver */
 static int current_speed_selection; /* Speed selected by user */
 static unsigned long led_next_time;
@@ -175,7 +182,6 @@ static int led_active;
 static int rx_queue_len;
 
 /* Duplex */
-static DEFINE_TIMER(duplex_timer, NULL);
 static int full_duplex;
 static enum duplex current_duplex;
 
@@ -200,9 +206,7 @@ static void update_rx_stats(struct net_device_stats *);
 static void update_tx_stats(struct net_device_stats *);
 static int e100_probe_transceiver(struct net_device* dev);
 
-static void e100_check_speed(unsigned long priv);
 static void e100_set_speed(struct net_device* dev, unsigned long speed);
-static void e100_check_duplex(unsigned long priv);
 static void e100_set_duplex(struct net_device* dev, enum duplex);
 static void e100_negotiate(struct net_device* dev);
 
@@ -214,7 +218,6 @@ static void e100_send_mdio_bit(unsigned char bit);
 static unsigned char e100_receive_mdio_bit(void);
 static void e100_reset_transceiver(struct net_device* net);
 
-static void e100_clear_network_leds(unsigned long dummy);
 static void e100_set_network_leds(int active);
 
 static const struct ethtool_ops e100_ethtool_ops;
@@ -381,17 +384,12 @@ etrax_ethernet_init(void)
        current_speed = 10;
        current_speed_selection = 0; /* Auto */
        speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
-       speed_timer.data = (unsigned long)dev;
-       speed_timer.function = e100_check_speed;
-
-       clear_led_timer.function = e100_clear_network_leds;
-       clear_led_timer.data = (unsigned long)dev;
 
        full_duplex = 0;
        current_duplex = autoneg;
        duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
-        duplex_timer.data = (unsigned long)dev;
-       duplex_timer.function = e100_check_duplex;
+
+       timer_dev = dev;
 
         /* Initialize mii interface */
        np->mii_if.phy_id_mask = 0x1f;
@@ -680,9 +678,9 @@ intel_check_speed(struct net_device* dev)
 }
 #endif
 static void
-e100_check_speed(unsigned long priv)
+e100_check_speed(struct timer_list *unused)
 {
-       struct net_device* dev = (struct net_device*)priv;
+       struct net_device* dev = timer_dev;
        struct net_local *np = netdev_priv(dev);
        static int led_initiated = 0;
        unsigned long data;
@@ -799,9 +797,9 @@ e100_set_speed(struct net_device* dev, unsigned long speed)
 }
 
 static void
-e100_check_duplex(unsigned long priv)
+e100_check_duplex(struct timer_list *unused)
 {
-       struct net_device *dev = (struct net_device *)priv;
+       struct net_device *dev = timer_dev;
        struct net_local *np = netdev_priv(dev);
        int old_duplex;
 
@@ -1669,9 +1667,9 @@ e100_hardware_send_packet(struct net_local *np, char *buf, int length)
 }
 
 static void
-e100_clear_network_leds(unsigned long dummy)
+e100_clear_network_leds(struct timer_list *unused)
 {
-       struct net_device *dev = (struct net_device *)dummy;
+       struct net_device *dev = timer_dev;
        struct net_local *np = netdev_priv(dev);
 
        spin_lock(&np->led_lock);
index 93faa1fed6f266017415753fbe383227f22e4a96..ea01f24f15e77f4b9765d3bed76ce71527e39dad 100644 (file)
@@ -95,7 +95,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
        reg = reg_readl(priv, REG_SPHY_CNTRL);
        if (enable) {
                reg |= PHY_RESET;
-               reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+               reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
                reg_writel(priv, reg, REG_SPHY_CNTRL);
                udelay(21);
                reg = reg_readl(priv, REG_SPHY_CNTRL);
index 436668bd50dc84dc72f587402348224fe2d9021e..46af8052e535361e7d73ce62f4e6a017c1352bbc 100644 (file)
@@ -149,9 +149,9 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
        mutex_unlock(&chip->reg_lock);
 }
 
-static void mv88e6xxx_phy_ppu_reenable_timer(unsigned long _ps)
+static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
 {
-       struct mv88e6xxx_chip *chip = (void *)_ps;
+       struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
 
        schedule_work(&chip->ppu_work);
 }
@@ -193,8 +193,7 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
 {
        mutex_init(&chip->ppu_mutex);
        INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
-       setup_timer(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer,
-                   (unsigned long)chip);
+       timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
 }
 
 static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
index fccce4b477782ae7d41b927e9a51bf7d79a74774..74263f8efe1a622d86af9a9b79f8d965a5870b17 100644 (file)
@@ -139,9 +139,9 @@ static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
 
 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
 
-static void eql_timer(unsigned long param)
+static void eql_timer(struct timer_list *t)
 {
-       equalizer_t *eql = (equalizer_t *) param;
+       equalizer_t *eql = from_timer(eql, t, timer);
        struct list_head *this, *tmp, *head;
 
        spin_lock(&eql->queue.lock);
@@ -178,7 +178,7 @@ static void __init eql_setup(struct net_device *dev)
 {
        equalizer_t *eql = netdev_priv(dev);
 
-       setup_timer(&eql->timer, eql_timer, (unsigned long)eql);
+       timer_setup(&eql->timer, eql_timer, 0);
        eql->timer.expires      = jiffies + EQL_DEFAULT_RESCHED_IVAL;
 
        spin_lock_init(&eql->queue.lock);
index 0658cde1586a3fe50a7f0cb22439bd9a5a659d40..7120f2b9c6efa486040ceebf89ed4adeda6fa28e 100644 (file)
@@ -1092,9 +1092,11 @@ static void tx_reclaim_skb(struct bfin_mac_local *lp)
        return;
 }
 
-static void tx_reclaim_skb_timeout(unsigned long lp)
+static void tx_reclaim_skb_timeout(struct timer_list *t)
 {
-       tx_reclaim_skb((struct bfin_mac_local *)lp);
+       struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer);
+
+       tx_reclaim_skb(lp);
 }
 
 static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
@@ -1650,8 +1652,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
        ndev->netdev_ops = &bfin_mac_netdev_ops;
        ndev->ethtool_ops = &bfin_mac_ethtool_ops;
 
-       setup_timer(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout,
-                   (unsigned long)lp);
+       timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0);
 
        lp->flags = 0;
        netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
index 658e92f79d36b1adb7cc192f5d9fe129ae7ec8cd..48220b6c600d38cd0d84b68d73ffb75b8ad10b01 100644 (file)
@@ -3080,9 +3080,9 @@ err_out:
  * The routine called when the error timer expires, to track the number of
  * recurring errors.
  */
-static void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(struct timer_list *t)
 {
-       struct et131x_adapter *adapter = (struct et131x_adapter *)data;
+       struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
        struct phy_device *phydev = adapter->netdev->phydev;
 
        if (et1310_in_phy_coma(adapter)) {
@@ -3624,8 +3624,7 @@ static int et131x_open(struct net_device *netdev)
        int result;
 
        /* Start the timer to track NIC errors */
-       setup_timer(&adapter->error_timer, et131x_error_timer_handler,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
        adapter->error_timer.expires = jiffies +
                msecs_to_jiffies(TX_ERROR_PERIOD);
        add_timer(&adapter->error_timer);
index 7451922c209dc2e3ccc8c2d21a45063c938eb3fd..97c5a89a9cf7a4f1a65dbef70d889efd7d661c20 100644 (file)
@@ -2579,6 +2579,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
        bool wd_state;
        int rc;
 
+       set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
        rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
        if (rc) {
                dev_err(&pdev->dev, "Can not initialize device\n");
@@ -2592,6 +2593,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
                goto err_device_destroy;
        }
 
+       clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+       /* Make sure we don't have a race with AENQ Links state handler */
+       if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+               netif_carrier_on(adapter->netdev);
+
        rc = ena_enable_msix_and_set_admin_interrupts(adapter,
                                                      adapter->num_queues);
        if (rc) {
@@ -2618,7 +2624,7 @@ err_device_destroy:
        ena_com_admin_destroy(ena_dev);
 err:
        clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
-
+       clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
        dev_err(&pdev->dev,
                "Reset attempt failed. Can not reset the device\n");
 
@@ -2853,9 +2859,9 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
                (netdev->features & GENMASK_ULL(63, 32)) >> 32;
 }
 
-static void ena_timer_service(unsigned long data)
+static void ena_timer_service(struct timer_list *t)
 {
-       struct ena_adapter *adapter = (struct ena_adapter *)data;
+       struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
        u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
        struct ena_admin_host_info *host_info =
                adapter->ena_dev->host_attr.host_info;
@@ -3272,8 +3278,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        ena_update_hints(adapter, &get_feat_ctx.hw_hints);
 
-       setup_timer(&adapter->timer_service, ena_timer_service,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->timer_service, ena_timer_service, 0);
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
 
        dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
@@ -3495,7 +3500,8 @@ static void ena_update_on_link_change(void *adapter_data,
        if (status) {
                netdev_dbg(adapter->netdev, "%s\n", __func__);
                set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
-               netif_carrier_on(adapter->netdev);
+               if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
+                       netif_carrier_on(adapter->netdev);
        } else {
                clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
                netif_carrier_off(adapter->netdev);
index ed8bd0a579c4a2e8b93726a19642f31f7d21cc5f..3bbc003871de401baa30cc47439969b0f49d732b 100644 (file)
@@ -272,7 +272,8 @@ enum ena_flags_t {
        ENA_FLAG_DEV_UP,
        ENA_FLAG_LINK_UP,
        ENA_FLAG_MSIX_ENABLED,
-       ENA_FLAG_TRIGGER_RESET
+       ENA_FLAG_TRIGGER_RESET,
+       ENA_FLAG_ONGOING_RESET
 };
 
 /* adapter specific private data structure */
index 483e97691eeae2de4604e49cdb8fd8d60fb0dda4..78dfb2ab78cefa0586168a0d043348c3488f0f20 100644 (file)
@@ -163,9 +163,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
        return 0;
 }
 
-static void aq_nic_service_timer_cb(unsigned long param)
+static void aq_nic_service_timer_cb(struct timer_list *t)
 {
-       struct aq_nic_s *self = (struct aq_nic_s *)param;
+       struct aq_nic_s *self = from_timer(self, t, service_timer);
        struct net_device *ndev = aq_nic_get_ndev(self);
        int err = 0;
        unsigned int i = 0U;
@@ -201,9 +201,9 @@ err_exit:
                  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
 }
 
-static void aq_nic_polling_timer_cb(unsigned long param)
+static void aq_nic_polling_timer_cb(struct timer_list *t)
 {
-       struct aq_nic_s *self = (struct aq_nic_s *)param;
+       struct aq_nic_s *self = from_timer(self, t, polling_timer);
        struct aq_vec_s *aq_vec = NULL;
        unsigned int i = 0U;
 
@@ -440,14 +440,12 @@ int aq_nic_start(struct aq_nic_s *self)
        err = aq_nic_update_interrupt_moderation_settings(self);
        if (err)
                goto err_exit;
-       setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
-                   (unsigned long)self);
+       timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
        mod_timer(&self->service_timer, jiffies +
                        AQ_CFG_SERVICE_TIMER_INTERVAL);
 
        if (self->aq_nic_cfg.is_polling) {
-               setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
-                           (unsigned long)self);
+               timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
                mod_timer(&self->polling_timer, jiffies +
                          AQ_CFG_POLLING_TIMER_INTERVAL);
        } else {
index 8c9986f3fc0186701bd9ae81f27cbb1519e9f25f..94270f654b3b534b88ed3296f7556de0186de123 100644 (file)
@@ -222,9 +222,10 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
  * atl1c_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl1c_phy_config(unsigned long data)
+static void atl1c_phy_config(struct timer_list *t)
 {
-       struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
+       struct atl1c_adapter *adapter = from_timer(adapter, t,
+                                                  phy_config_timer);
        struct atl1c_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -2613,8 +2614,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->mii.phy_id_mask = 0x1f;
        adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
        netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
-       setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
-                       (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
        /* setup the private structure */
        err = atl1c_sw_init(adapter);
        if (err) {
index 4f7e195af0bc6dff79687547b9979375b35a17d6..9dc6da039a6d90ac4137a70e94b2c3213c2a4741 100644 (file)
@@ -130,9 +130,10 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
  * atl1e_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl1e_phy_config(unsigned long data)
+static void atl1e_phy_config(struct timer_list *t)
 {
-       struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+       struct atl1e_adapter *adapter = from_timer(adapter, t,
+                                                  phy_config_timer);
        struct atl1e_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -2361,8 +2362,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
 
-       setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
 
        /* get user settings */
        atl1e_check_options(adapter);
index 83d2db2abb45535c43f5921ab75791f9869ed4c0..b81fbf119bce314a9e2f282672dccd9712139eaa 100644 (file)
@@ -2575,9 +2575,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
  * atl1_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl1_phy_config(unsigned long data)
+static void atl1_phy_config(struct timer_list *t)
 {
-       struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+       struct atl1_adapter *adapter = from_timer(adapter, t,
+                                                 phy_config_timer);
        struct atl1_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -3071,8 +3072,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* assume we have no link for now */
        netif_carrier_off(netdev);
 
-       setup_timer(&adapter->phy_config_timer, atl1_phy_config,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
        adapter->phy_timer_pending = false;
 
        INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
index 77a1c03255defa77f2c662650d41a1ffc68eb7bb..db4bcc51023adf546ed0e8a9c8d075beec048faf 100644 (file)
@@ -1028,9 +1028,9 @@ static void atl2_tx_timeout(struct net_device *netdev)
  * atl2_watchdog - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl2_watchdog(unsigned long data)
+static void atl2_watchdog(struct timer_list *t)
 {
-       struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+       struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
 
        if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
                u32 drop_rxd, drop_rxs;
@@ -1053,9 +1053,10 @@ static void atl2_watchdog(unsigned long data)
  * atl2_phy_config - Timer Call-back
  * @data: pointer to netdev cast into an unsigned long
  */
-static void atl2_phy_config(unsigned long data)
+static void atl2_phy_config(struct timer_list *t)
 {
-       struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+       struct atl2_adapter *adapter = from_timer(adapter, t,
+                                                 phy_config_timer);
        struct atl2_hw *hw = &adapter->hw;
        unsigned long flags;
 
@@ -1434,11 +1435,9 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        atl2_check_options(adapter);
 
-       setup_timer(&adapter->watchdog_timer, atl2_watchdog,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0);
 
-       setup_timer(&adapter->phy_config_timer, atl2_phy_config,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0);
 
        INIT_WORK(&adapter->reset_task, atl2_reset_task);
        INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
index 42e44fc03a181b7564f1ff16ce05a545e135208d..e445ab724827f8d3c7c3770748902c08b76f8e13 100644 (file)
@@ -599,9 +599,9 @@ static void b44_check_phy(struct b44 *bp)
        }
 }
 
-static void b44_timer(unsigned long __opaque)
+static void b44_timer(struct timer_list *t)
 {
-       struct b44 *bp = (struct b44 *) __opaque;
+       struct b44 *bp = from_timer(bp, t, timer);
 
        spin_lock_irq(&bp->lock);
 
@@ -1474,7 +1474,7 @@ static int b44_open(struct net_device *dev)
                goto out;
        }
 
-       setup_timer(&bp->timer, b44_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, b44_timer, 0);
        bp->timer.expires = jiffies + HZ;
        add_timer(&bp->timer);
 
index b3055a76dfbf5275a2b4dc6b864e5aa9704f913d..7919f6112ecf95786e9f9a180fa248ef258428a5 100644 (file)
@@ -6183,9 +6183,9 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
 }
 
 static void
-bnx2_timer(unsigned long data)
+bnx2_timer(struct timer_list *t)
 {
-       struct bnx2 *bp = (struct bnx2 *) data;
+       struct bnx2 *bp = from_timer(bp, t, timer);
 
        if (!netif_running(bp->dev))
                return;
@@ -8462,7 +8462,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bnx2_set_default_link(bp);
        bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
 
-       setup_timer(&bp->timer, bnx2_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, bnx2_timer, 0);
        bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
 
 #ifdef BCM_CNIC
index be9fd7d184d0a00acfcc3a2e5b4d06a52d2a761d..91e2a7560b48d572d26e8566c9a3b0667083d45d 100644 (file)
@@ -5761,9 +5761,9 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
                 bp->fw_drv_pulse_wr_seq);
 }
 
-static void bnx2x_timer(unsigned long data)
+static void bnx2x_timer(struct timer_list *t)
 {
-       struct bnx2x *bp = (struct bnx2x *) data;
+       struct bnx2x *bp = from_timer(bp, t, timer);
 
        if (!netif_running(bp->dev))
                return;
@@ -12421,7 +12421,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
 
-       setup_timer(&bp->timer, bnx2x_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, bnx2x_timer, 0);
        bp->timer.expires = jiffies + bp->current_interval;
 
        if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
index 33c49ad697e47360a83abcfeb1a7c1dae008da5f..c5c38d4b7d1ccd04044f972777c5d3844e755a02 100644 (file)
@@ -6962,9 +6962,9 @@ static void bnxt_poll_controller(struct net_device *dev)
 }
 #endif
 
-static void bnxt_timer(unsigned long data)
+static void bnxt_timer(struct timer_list *t)
 {
-       struct bnxt *bp = (struct bnxt *)data;
+       struct bnxt *bp = from_timer(bp, t, timer);
        struct net_device *dev = bp->dev;
 
        if (!netif_running(dev))
@@ -7236,7 +7236,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        bnxt_init_dflt_coal(bp);
 
-       setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp);
+       timer_setup(&bp->timer, bnxt_timer, 0);
        bp->current_interval = BNXT_TIMER_INTERVAL;
 
        clear_bit(BNXT_STATE_OPEN, &bp->state);
index 7ce1d4b7e67de7a3be3229295d1bf06877585c03..b13ce5ebde8d9ce4cda2ed690a53a58d624b73f9 100644 (file)
@@ -2136,8 +2136,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
        /* Read A2 portion of the EEPROM */
        if (length) {
                start -= ETH_MODULE_SFF_8436_LEN;
-               bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
-                                                length, data);
+               rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+                                                     start, length, data);
        }
        return rc;
 }
index d8d5f207c759fb51b970359cb52fba32dcbda638..de51c2177d03b3cf9e4653226bd8901b1d29834e 100644 (file)
@@ -10931,9 +10931,9 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
        }
 }
 
-static void tg3_timer(unsigned long __opaque)
+static void tg3_timer(struct timer_list *t)
 {
-       struct tg3 *tp = (struct tg3 *) __opaque;
+       struct tg3 *tp = from_timer(tp, t, timer);
 
        spin_lock(&tp->lock);
 
@@ -11087,7 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp)
        tp->asf_multiplier = (HZ / tp->timer_offset) *
                             TG3_FW_UPDATE_FREQ_SEC;
 
-       setup_timer(&tp->timer, tg3_timer, (unsigned long)tp);
+       timer_setup(&tp->timer, tg3_timer, 0);
 }
 
 static void tg3_timer_start(struct tg3 *tp)
index d4496e9afcdf37d12043a9db6beb2dbe76e6daa6..8b2c31e2a2b0281d6ca8c70bbf3a520bdd15eb31 100644 (file)
@@ -1355,7 +1355,6 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 
        /* Offload checksum calculation to HW */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               hdr->csum_l3 = 1; /* Enable IP csum calculation */
                hdr->l3_offset = skb_network_offset(skb);
                hdr->l4_offset = skb_transport_offset(skb);
 
index 8dc21c9f97168e1a0a12bcf2c0d6b9ea224fa42f..973c1fb70d09929f92fc47db0e3d60e3146eaff0 100644 (file)
@@ -123,9 +123,9 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
 }
 
 #ifdef CONFIG_RFS_ACCEL
-void enic_flow_may_expire(unsigned long data)
+void enic_flow_may_expire(struct timer_list *t)
 {
-       struct enic *enic = (struct enic *)data;
+       struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
        bool res;
        int j;
 
index 0ae83e091a629d6f6aae135560fc07dcee129633..8c4ce50da6e1f43d7faf5ab5cbed27b472561572 100644 (file)
@@ -16,12 +16,11 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id);
 #ifdef CONFIG_RFS_ACCEL
 int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
                       u16 rxq_index, u32 flow_id);
-void enic_flow_may_expire(unsigned long data);
+void enic_flow_may_expire(struct timer_list *t);
 
 static inline void enic_rfs_timer_start(struct enic *enic)
 {
-       setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire,
-                   (unsigned long)enic);
+       timer_setup(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 0);
        mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
 }
 
index 4a11baffe02d931fc54a9b9a0bc8d56e20dd800a..e130fb757e7bbeffeb78ac8139d84f1f1c688440 100644 (file)
@@ -1676,9 +1676,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-static void enic_notify_timer(unsigned long data)
+static void enic_notify_timer(struct timer_list *t)
 {
-       struct enic *enic = (struct enic *)data;
+       struct enic *enic = from_timer(enic, t, notify_timer);
 
        enic_notify_check(enic);
 
@@ -2846,8 +2846,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Setup notification timer, HW reset task, and wq locks
         */
 
-       setup_timer(&enic->notify_timer, enic_notify_timer,
-                   (unsigned long)enic);
+       timer_setup(&enic->notify_timer, enic_notify_timer, 0);
 
        enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
index 23053919ebf5fb2744a1e3132f8cf0b0c3c13c91..ae55da60ed0ed1ece830bd515d26a932fca8150e 100644 (file)
@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
        RXFSD = 0x00000800,     /* first descriptor */
        RXLSD = 0x00000400,     /* last descriptor */
        ErrorSummary = 0x80,    /* error summary */
-       RUNT = 0x40,            /* runt packet received */
-       LONG = 0x20,            /* long packet received */
+       RUNTPKT = 0x40,         /* runt packet received */
+       LONGPKT = 0x20,         /* long packet received */
        FAE = 0x10,             /* frame align error */
        CRC = 0x08,             /* crc error */
        RXER = 0x04,            /* receive error */
@@ -1628,7 +1628,7 @@ static int netdev_rx(struct net_device *dev)
                                               dev->name, rx_status);
 
                                dev->stats.rx_errors++; /* end of a packet. */
-                               if (rx_status & (LONG | RUNT))
+                               if (rx_status & (LONGPKT | RUNTPKT))
                                        dev->stats.rx_length_errors++;
                                if (rx_status & RXER)
                                        dev->stats.rx_frame_errors++;
index 04aaacbc3d451bde70e4f358e648596e5ea3bdd5..1dc4aef37d3a4faf3592ab0dede92b802a831824 100644 (file)
@@ -849,7 +849,6 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
        union ibmvnic_crq crq;
-       dma_addr_t dma_addr;
        int len = 0;
 
        if (adapter->vpd->buff)
@@ -879,7 +878,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
        adapter->vpd->dma_addr =
                dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
                               DMA_FROM_DEVICE);
-       if (dma_mapping_error(dev, dma_addr)) {
+       if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
                dev_err(dev, "Could not map VPD buffer\n");
                kfree(adapter->vpd->buff);
                return -ENOMEM;
index 8172cf08cc330eb9d4cc61044513a5aed79825e9..3bac9df1c09942a0806681db5afe64d8110f1ecb 100644 (file)
@@ -4307,8 +4307,10 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
 
        rar_num = E1000_RAR_ENTRIES;
 
-       /* Zero out the other 15 receive addresses. */
-       e_dbg("Clearing RAR[1-15]\n");
+       /* Zero out the following 14 receive addresses. RAR[15] is for
+        * manageability
+        */
+       e_dbg("Clearing RAR[1-14]\n");
        for (i = 1; i < rar_num; i++) {
                E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
                E1000_WRITE_FLUSH();
index 67163ca898ba2abca4e4e66eecdf76a5f5c3984c..00a36df02a3fd917e40989577af5b43f4bd064f5 100644 (file)
 #define NVM_SIZE_MULTIPLIER 4096       /*multiplier for NVMS field */
 #define E1000_FLASH_BASE_ADDR 0xE000   /*offset of NVM access regs */
 #define E1000_CTRL_EXT_NVMVS 0x3       /*NVM valid sector */
-#define E1000_TARC0_CB_MULTIQ_3_REQ    (1 << 28 | 1 << 29)
+#define E1000_TARC0_CB_MULTIQ_3_REQ    0x30000000
+#define E1000_TARC0_CB_MULTIQ_2_REQ    0x20000000
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES  7
index f2f49239b01509e120a292605ef4e0e9ef21c248..9f18d39bdc8f7a7d3c8cb8e9105f9632054f5e7f 100644 (file)
@@ -3034,9 +3034,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
                ew32(IOSFPC, reg_val);
 
                reg_val = er32(TARC(0));
-               /* SPT and KBL Si errata workaround to avoid Tx hang */
-               reg_val &= ~BIT(28);
-               reg_val |= BIT(29);
+               /* SPT and KBL Si errata workaround to avoid Tx hang.
+                * Dropping the number of outstanding requests from
+                * 3 to 2 in order to avoid a buffer overrun.
+                */
+               reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
+               reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
                ew32(TARC(0), reg_val);
        }
 }
index dbd69310f263ea5e4815a020bd1ec8a6113a85e5..538b42d5c1874870f865b7b217a328fb04e2b252 100644 (file)
@@ -1231,7 +1231,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
index 5829715fa34222ccfe4cbdb2c9366fa6f379cad2..e019baa905c569de929c5b67bb984d8a600f75dc 100644 (file)
@@ -90,7 +90,6 @@
 #define I40E_AQ_LEN                    256
 #define I40E_AQ_WORK_LIMIT             66 /* max number of VFs + a little */
 #define I40E_MAX_USER_PRIORITY         8
-#define I40E_MAX_QUEUES_PER_CH         64
 #define I40E_DEFAULT_TRAFFIC_CLASS     BIT(0)
 #define I40E_DEFAULT_MSG_ENABLE                4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT    10
index 9dcb2a961197b1f5c8919959d9661c432f03ad13..9af74253c3f7203bb00caac106e99b798f7b658b 100644 (file)
@@ -613,6 +613,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
                hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
        }
 
+       /* Newer versions of firmware require lock when reading the NVM */
+       if (hw->aq.api_maj_ver > 1 ||
+           (hw->aq.api_maj_ver == 1 &&
+            hw->aq.api_min_ver >= 5))
+               hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
        /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
        if (hw->aq.api_maj_ver > 1 ||
            (hw->aq.api_maj_ver == 1 &&
index 0203665cb53c105329fbe56d86b6ea16273b26e3..095965f268bd3508169a631ef5c3b1e88916c48d 100644 (file)
@@ -948,7 +948,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
                hw->pf_id = (u8)(func_rid & 0x7);
 
        if (hw->mac.type == I40E_MAC_X722)
-               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+                            I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
 
        status = i40e_init_nvm(hw);
        return status;
@@ -1268,6 +1269,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
         * we don't need to do the PF Reset
         */
        if (!cnt) {
+               u32 reg2 = 0;
                if (hw->revision_id == 0)
                        cnt = I40E_PF_RESET_WAIT_COUNT_A0;
                else
@@ -1279,6 +1281,12 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
                        reg = rd32(hw, I40E_PFGEN_CTRL);
                        if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
                                break;
+                       reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+                       if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+                               hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n");
+                               hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+                               return I40E_ERR_NOT_READY;
+                       }
                        usleep_range(1000, 2000);
                }
                if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
index 4a964d6e4a9ebcdb7b55b157bb9b6006a5fd2aa8..321d8be80871ce2fc4a5ab39c3189dad2370c988 100644 (file)
@@ -2166,6 +2166,73 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
        return aq_ret;
 }
 
+/**
+ * i40e_set_promiscuous - set promiscuous mode
+ * @pf: board private structure
+ * @promisc: promisc on or off
+ *
+ * There are different ways of setting promiscuous mode on a PF depending on
+ * what state/environment we're in.  This identifies and sets it appropriately.
+ * Returns 0 on success.
+ **/
+static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status aq_ret;
+
+       if (vsi->type == I40E_VSI_MAIN &&
+           pf->lan_veb != I40E_NO_VEB &&
+           !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+               /* set defport ON for Main VSI instead of true promisc
+                * this way we will get all unicast/multicast and VLAN
+                * promisc behavior but will not get VF or VMDq traffic
+                * replicated on the Main VSI.
+                */
+               if (promisc)
+                       aq_ret = i40e_aq_set_default_vsi(hw,
+                                                        vsi->seid,
+                                                        NULL);
+               else
+                       aq_ret = i40e_aq_clear_default_vsi(hw,
+                                                          vsi->seid,
+                                                          NULL);
+               if (aq_ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Set default VSI failed, err %s, aq_err %s\n",
+                                i40e_stat_str(hw, aq_ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+               }
+       } else {
+               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
+                                                 hw,
+                                                 vsi->seid,
+                                                 promisc, NULL,
+                                                 true);
+               if (aq_ret) {
+                       dev_info(&pf->pdev->dev,
+                                "set unicast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(hw, aq_ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+               }
+               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+                                                 hw,
+                                                 vsi->seid,
+                                                 promisc, NULL);
+               if (aq_ret) {
+                       dev_info(&pf->pdev->dev,
+                                "set multicast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(hw, aq_ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+               }
+       }
+
+       if (!aq_ret)
+               pf->cur_promisc = promisc;
+
+       return aq_ret;
+}
+
 /**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
@@ -2467,81 +2534,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_VSI_OVERFLOW_PROMISC,
                                        vsi->state));
-               if ((vsi->type == I40E_VSI_MAIN) &&
-                   (pf->lan_veb != I40E_NO_VEB) &&
-                   !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
-                       /* set defport ON for Main VSI instead of true promisc
-                        * this way we will get all unicast/multicast and VLAN
-                        * promisc behavior but will not get VF or VMDq traffic
-                        * replicated on the Main VSI.
-                        */
-                       if (pf->cur_promisc != cur_promisc) {
-                               pf->cur_promisc = cur_promisc;
-                               if (cur_promisc)
-                                       aq_ret =
-                                             i40e_aq_set_default_vsi(hw,
-                                                                     vsi->seid,
-                                                                     NULL);
-                               else
-                                       aq_ret =
-                                           i40e_aq_clear_default_vsi(hw,
-                                                                     vsi->seid,
-                                                                     NULL);
-                               if (aq_ret) {
-                                       retval = i40e_aq_rc_to_posix(aq_ret,
-                                                       hw->aq.asq_last_status);
-                                       dev_info(&pf->pdev->dev,
-                                                "Set default VSI failed on %s, err %s, aq_err %s\n",
-                                                vsi_name,
-                                                i40e_stat_str(hw, aq_ret),
-                                                i40e_aq_str(hw,
-                                                    hw->aq.asq_last_status));
-                               }
-                       }
-               } else {
-                       aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
-                                                         hw,
-                                                         vsi->seid,
-                                                         cur_promisc, NULL,
-                                                         true);
-                       if (aq_ret) {
-                               retval =
-                               i40e_aq_rc_to_posix(aq_ret,
-                                                   hw->aq.asq_last_status);
-                               dev_info(&pf->pdev->dev,
-                                        "set unicast promisc failed on %s, err %s, aq_err %s\n",
-                                        vsi_name,
-                                        i40e_stat_str(hw, aq_ret),
-                                        i40e_aq_str(hw,
-                                                    hw->aq.asq_last_status));
-                       }
-                       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
-                                                         hw,
-                                                         vsi->seid,
-                                                         cur_promisc, NULL);
-                       if (aq_ret) {
-                               retval =
-                               i40e_aq_rc_to_posix(aq_ret,
-                                                   hw->aq.asq_last_status);
-                               dev_info(&pf->pdev->dev,
-                                        "set multicast promisc failed on %s, err %s, aq_err %s\n",
-                                        vsi_name,
-                                        i40e_stat_str(hw, aq_ret),
-                                        i40e_aq_str(hw,
-                                                    hw->aq.asq_last_status));
-                       }
-               }
-               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-                                                  vsi->seid,
-                                                  cur_promisc, NULL);
+               aq_ret = i40e_set_promiscuous(pf, cur_promisc);
                if (aq_ret) {
                        retval = i40e_aq_rc_to_posix(aq_ret,
-                                                    pf->hw.aq.asq_last_status);
+                                                    hw->aq.asq_last_status);
                        dev_info(&pf->pdev->dev,
-                                "set brdcast promisc failed, err %s, aq_err %s\n",
-                                        i40e_stat_str(hw, aq_ret),
-                                        i40e_aq_str(hw,
-                                                    hw->aq.asq_last_status));
+                                "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+                                cur_promisc ? "on" : "off",
+                                vsi_name,
+                                i40e_stat_str(hw, aq_ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
                }
        }
 out:
@@ -3964,7 +3966,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                /* if the descriptor isn't done, no work yet to do */
                if (!(eop_desc->cmd_type_offset_bsz &
@@ -5629,14 +5631,6 @@ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
                return -EINVAL;
 
        *reconfig_rss = false;
-
-       if (num_queues > I40E_MAX_QUEUES_PER_CH) {
-               dev_err(&pf->pdev->dev,
-                       "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
-                       num_queues, I40E_MAX_QUEUES_PER_CH);
-               return -EINVAL;
-       }
-
        if (vsi->current_rss_size) {
                if (num_queues > vsi->current_rss_size) {
                        dev_dbg(&pf->pdev->dev,
@@ -7407,7 +7401,6 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
                dev_err(&pf->pdev->dev,
                        "Failed to add cloud filter, err %s\n",
                        i40e_stat_str(&pf->hw, err));
-               err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
                goto err;
        }
 
@@ -9429,6 +9422,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        if (!lock_acquired)
                rtnl_unlock();
 
+       /* Restore promiscuous settings */
+       ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+       if (ret)
+               dev_warn(&pf->pdev->dev,
+                        "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+                        pf->cur_promisc ? "on" : "off",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
        i40e_reset_all_vfs(pf, true);
 
        /* tell the firmware that we're starting */
index 0ccab0a5d717564d5773fb303d7c7f04ddcc0809..7689c2ee0d463650a4c4757c1210751767cad1aa 100644 (file)
@@ -328,15 +328,17 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                               u16 *data)
 {
-       i40e_status ret_code;
+       i40e_status ret_code = 0;
 
-       ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+       if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
        if (ret_code)
                return ret_code;
 
        ret_code = __i40e_read_nvm_word(hw, offset, data);
 
-       i40e_release_nvm(hw);
+       if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+               i40e_release_nvm(hw);
 
        return ret_code;
 }
index d6d352a6e6ead2ad7f228d7906a24337ad023f76..4566d66ffc7c95a72c4135075a19d5ac8bcd2689 100644 (file)
@@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
                /* we have caught up to head, no work left to do */
index 00d4833e992515b026c87c6dcf9dad91b1f84a46..0e8568719b4e65f924ead244da35b7e4a6a07b1e 100644 (file)
@@ -629,6 +629,7 @@ struct i40e_hw {
 #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
 #define I40E_HW_FLAG_802_1AD_CAPABLE        BIT_ULL(1)
 #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE  BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
        u64 flags;
 
        /* Used in set switch config AQ command */
index f8a794b72462873d2942dcaddac36ddad9db66e8..36cb8e068e856b321ae426b4eb0e1953330790aa 100644 (file)
@@ -2086,7 +2086,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
        }
 
        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
-                                     (u8 *)vfres, sizeof(vfres));
+                                     (u8 *)vfres, sizeof(*vfres));
 }
 
 /**
@@ -2218,18 +2218,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                struct i40e_mac_filter *f;
 
                f = i40e_find_mac(vsi, al->list[i].addr);
-               if (!f)
+               if (!f) {
                        f = i40e_add_mac_filter(vsi, al->list[i].addr);
 
-               if (!f) {
-                       dev_err(&pf->pdev->dev,
-                               "Unable to add MAC filter %pM for VF %d\n",
-                                al->list[i].addr, vf->vf_id);
-                       ret = I40E_ERR_PARAM;
-                       spin_unlock_bh(&vsi->mac_filter_hash_lock);
-                       goto error_param;
-               } else {
-                       vf->num_mac++;
+                       if (!f) {
+                               dev_err(&pf->pdev->dev,
+                                       "Unable to add MAC filter %pM for VF %d\n",
+                                       al->list[i].addr, vf->vf_id);
+                               ret = I40E_ERR_PARAM;
+                               spin_unlock_bh(&vsi->mac_filter_hash_lock);
+                               goto error_param;
+                       } else {
+                               vf->num_mac++;
+                       }
                }
        }
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
index fe817e2b6fef4468e2ed33a50de7190b08f3c282..50864f99446d3a9f22c83c984eaec4f25caa6015 100644 (file)
@@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
                /* if the descriptor isn't done, no work yet to do */
index d8131139565ea0ae48c628d1b7219d35bacd3a7f..da60ce12b33d31660c6c00c0425b6ce5919dadd2 100644 (file)
@@ -25,6 +25,26 @@ static struct i40e_ops i40evf_lan_ops = {
        .setup_qvlist = i40evf_client_setup_qvlist,
 };
 
+/**
+ * i40evf_client_get_params - retrieve relevant client parameters
+ * @vsi: VSI with parameters
+ * @params: client param struct
+ **/
+static
+void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+       int i;
+
+       memset(params, 0, sizeof(struct i40e_params));
+       params->mtu = vsi->netdev->mtu;
+       params->link_up = vsi->back->link_up;
+
+       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+               params->qos.prio_qos[i].tc = 0;
+               params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
+       }
+}
+
 /**
  * i40evf_notify_client_message - call the client message receive callback
  * @vsi: the VSI associated with this client
@@ -66,10 +86,6 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
                return;
 
        cinst = vsi->back->cinst;
-       memset(&params, 0, sizeof(params));
-       params.mtu = vsi->netdev->mtu;
-       params.link_up = vsi->back->link_up;
-       params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
 
        if (!cinst || !cinst->client || !cinst->client->ops ||
            !cinst->client->ops->l2_param_change) {
@@ -77,6 +93,8 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
                        "Cannot locate client instance l2_param_change function\n");
                return;
        }
+       i40evf_client_get_params(vsi, &params);
+       cinst->lan_info.params = params;
        cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
                                            &params);
 }
@@ -166,9 +184,9 @@ static struct i40e_client_instance *
 i40evf_client_add_instance(struct i40evf_adapter *adapter)
 {
        struct i40e_client_instance *cinst = NULL;
-       struct netdev_hw_addr *mac = NULL;
        struct i40e_vsi *vsi = &adapter->vsi;
-       int i;
+       struct netdev_hw_addr *mac = NULL;
+       struct i40e_params params;
 
        if (!vf_registered_client)
                goto out;
@@ -192,18 +210,14 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
        cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
        cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
        cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+       i40evf_client_get_params(vsi, &params);
+       cinst->lan_info.params = params;
        set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
 
        cinst->lan_info.msix_count = adapter->num_iwarp_msix;
        cinst->lan_info.msix_entries =
                        &adapter->msix_entries[adapter->iwarp_base_vector];
 
-       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-               cinst->lan_info.params.qos.prio_qos[i].tc = 0;
-               cinst->lan_info.params.qos.prio_qos[i].qs_handle =
-                                                               vsi->qs_handle;
-       }
-
        mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
                               struct netdev_hw_addr, list);
        if (mac)
index ca2ebdbd24d7c9ce832af711942671ac622d4a15..7b2a4eba92e23b7f5757d813219edcca1f69426a 100644 (file)
@@ -2110,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work)
                adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
                goto out;
        }
+       if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+               i40evf_notify_client_l2_params(&adapter->vsi);
+               adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+               goto out;
+       }
        if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
                i40evf_notify_client_close(&adapter->vsi, false);
                adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
@@ -2118,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work)
        if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
                i40evf_notify_client_open(&adapter->vsi);
                adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
-               goto out;
-       }
-       if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
-               i40evf_notify_client_l2_params(&adapter->vsi);
-               adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
        }
 out:
        clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
index e94d3c256667637c8186fd83299b64ea2de53c72..c208753ff5b7cec52259e15b5c545b5af4b51c6f 100644 (file)
@@ -7317,7 +7317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
index 713e8df23744a46ea30824a014f875bd28d29fc5..4214c1519a879c9cb70cd1791b361657a5615d1e 100644 (file)
@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
index ca06c3cc2ca841fc395c957efe64cf717b36670f..62a18914f00f4fe1f608b208da86ddf616c752e1 100644 (file)
@@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
index feed11bc9ddffdf7b779abab19a7e3e678909ecd..1f4a69134adeb58a14d13998e3be39feba5134f0 100644 (file)
@@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
                        break;
 
                /* prevent any other reads prior to eop_desc */
-               read_barrier_depends();
+               smp_rmb();
 
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
index 81c1fac00d330e1fc2e16cb9f7756f954339af62..62f204f3231693807231c269183a49961020c7c8 100644 (file)
@@ -1346,9 +1346,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
        spin_unlock_bh(&mp->mib_counters_lock);
 }
 
-static void mib_counters_timer_wrapper(unsigned long _mp)
+static void mib_counters_timer_wrapper(struct timer_list *t)
 {
-       struct mv643xx_eth_private *mp = (void *)_mp;
+       struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
        mib_counters_update(mp);
        mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
@@ -2321,9 +2321,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-static inline void oom_timer_wrapper(unsigned long data)
+static inline void oom_timer_wrapper(struct timer_list *t)
 {
-       struct mv643xx_eth_private *mp = (void *)data;
+       struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
 
        napi_schedule(&mp->napi);
 }
@@ -3178,8 +3178,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        mib_counters_clear(mp);
 
-       setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
-                   (unsigned long)mp);
+       timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
        mp->mib_counters_timer.expires = jiffies + 30 * HZ;
 
        spin_lock_init(&mp->mib_counters_lock);
@@ -3188,7 +3187,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
 
-       setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
+       timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
 
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
index 6c20e811f9732ee7b1e432827070e36ede6eb688..d83a78be98a2cb90f5cea6b07eb257e11ad9ebed 100644 (file)
@@ -4629,11 +4629,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
                       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
                val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
                writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
-               val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
-               val |= MVPP2_GMAC_DISABLE_PADDING;
-               val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
-               writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
        } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
                val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
                val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
@@ -4641,10 +4636,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
                       MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
                val &= ~MVPP22_CTRL4_DP_CLK_SEL;
                writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
-               val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
-               val &= ~MVPP2_GMAC_DISABLE_PADDING;
-               writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
        }
 
        /* The port is connected to a copper PHY */
@@ -5805,7 +5796,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                                                sizeof(*txq_pcpu->buffs),
                                                GFP_KERNEL);
                if (!txq_pcpu->buffs)
-                       goto cleanup;
+                       return -ENOMEM;
 
                txq_pcpu->count = 0;
                txq_pcpu->reserved_num = 0;
@@ -5821,26 +5812,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                                           &txq_pcpu->tso_headers_dma,
                                           GFP_KERNEL);
                if (!txq_pcpu->tso_headers)
-                       goto cleanup;
+                       return -ENOMEM;
        }
 
        return 0;
-cleanup:
-       for_each_present_cpu(cpu) {
-               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-               kfree(txq_pcpu->buffs);
-
-               dma_free_coherent(port->dev->dev.parent,
-                                 txq_pcpu->size * TSO_HEADER_SIZE,
-                                 txq_pcpu->tso_headers,
-                                 txq_pcpu->tso_headers_dma);
-       }
-
-       dma_free_coherent(port->dev->dev.parent,
-                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                         txq->descs, txq->descs_dma);
-
-       return -ENOMEM;
 }
 
 /* Free allocated TXQ resources */
@@ -6867,6 +6842,12 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
        else if (!IS_ALIGNED(ring->tx_pending, 32))
                new_tx_pending = ALIGN(ring->tx_pending, 32);
 
+       /* The Tx ring size cannot be smaller than the minimum number of
+        * descriptors needed for TSO.
+        */
+       if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
+               new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+
        if (ring->rx_pending != new_rx_pending) {
                netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
                            ring->rx_pending, new_rx_pending);
@@ -8345,7 +8326,7 @@ static int mvpp2_probe(struct platform_device *pdev)
        for_each_available_child_of_node(dn, port_node) {
                err = mvpp2_port_probe(pdev, port_node, priv, i);
                if (err < 0)
-                       goto err_mg_clk;
+                       goto err_port_probe;
                i++;
        }
 
@@ -8361,12 +8342,19 @@ static int mvpp2_probe(struct platform_device *pdev)
        priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
        if (!priv->stats_queue) {
                err = -ENOMEM;
-               goto err_mg_clk;
+               goto err_port_probe;
        }
 
        platform_set_drvdata(pdev, priv);
        return 0;
 
+err_port_probe:
+       i = 0;
+       for_each_available_child_of_node(dn, port_node) {
+               if (priv->port_list[i])
+                       mvpp2_port_remove(priv->port_list[i]);
+               i++;
+       }
 err_mg_clk:
        clk_disable_unprepare(priv->axi_clk);
        if (priv->hw_version == MVPP22)
index 91b1c154fd29e68157fea64b30c25186d903a047..7bbd86f08e5ff369f43af9b22818add8745cfe80 100644 (file)
@@ -362,9 +362,9 @@ static void rxq_refill(struct net_device *dev)
        }
 }
 
-static inline void rxq_refill_timer_wrapper(unsigned long data)
+static inline void rxq_refill_timer_wrapper(struct timer_list *t)
 {
-       struct pxa168_eth_private *pep = (void *)data;
+       struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
        napi_schedule(&pep->napi);
 }
 
@@ -1496,8 +1496,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
        netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
 
        memset(&pep->timeout, 0, sizeof(struct timer_list));
-       setup_timer(&pep->timeout, rxq_refill_timer_wrapper,
-                   (unsigned long)pep);
+       timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
 
        pep->smi_bus = mdiobus_alloc();
        if (!pep->smi_bus) {
index eef35bf3e8490f3832e62270d129d12bf098fae3..6e423f098a60d33f96678ef5c08b1b52cbe3287f 100644 (file)
@@ -1495,9 +1495,9 @@ static int xm_check_link(struct net_device *dev)
  * get an interrupt when carrier is detected, need to poll for
  * link coming up.
  */
-static void xm_link_timer(unsigned long arg)
+static void xm_link_timer(struct timer_list *t)
 {
-       struct skge_port *skge = (struct skge_port *) arg;
+       struct skge_port *skge = from_timer(skge, t, link_timer);
        struct net_device *dev = skge->netdev;
        struct skge_hw *hw = skge->hw;
        int port = skge->port;
@@ -3897,7 +3897,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
 
        /* Only used for Genesis XMAC */
        if (is_genesis(hw))
-           setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+           timer_setup(&skge->link_timer, xm_link_timer, 0);
        else {
                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
                                   NETIF_F_RXCSUM;
index 1145cde2274a4cb778ba816716c3c55a5850d71e..9efe1771423cdde9b7832042b61f3ac60b347123 100644 (file)
@@ -2974,9 +2974,9 @@ static int sky2_rx_hung(struct net_device *dev)
        }
 }
 
-static void sky2_watchdog(unsigned long arg)
+static void sky2_watchdog(struct timer_list *t)
 {
-       struct sky2_hw *hw = (struct sky2_hw *) arg;
+       struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
 
        /* Check for lost IRQ once a second */
        if (sky2_read32(hw, B0_ISRC)) {
@@ -5083,7 +5083,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                sky2_show_addr(dev1);
        }
 
-       setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+       timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
        INIT_WORK(&hw->restart_work, sky2_restart);
 
        pci_set_drvdata(pdev, hw);
index 2d46ec84ebdffbea5032a1f670befab4f585b22c..2d0897b7d86035286666e38ad4e41ab63fb4746b 100644 (file)
@@ -3142,13 +3142,17 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
        if (!mlxsw_sp->ports)
                return -ENOMEM;
 
-       mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+       mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
+                                                GFP_KERNEL);
        if (!mlxsw_sp->port_to_module) {
                err = -ENOMEM;
                goto err_port_to_module_alloc;
        }
 
        for (i = 1; i < max_ports; i++) {
+               /* Mark as invalid */
+               mlxsw_sp->port_to_module[i] = -1;
+
                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
                                                    &width, &lane);
                if (err)
@@ -3216,6 +3220,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
 
        for (i = 0; i < count; i++) {
                local_port = base_port + i * 2;
+               if (mlxsw_sp->port_to_module[local_port] < 0)
+                       continue;
                module = mlxsw_sp->port_to_module[local_port];
 
                mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
index 58cf222fb98576426a9dd654a31d7f033a11daa4..432ab9b12b7f59ded586a663f0a686741740463d 100644 (file)
@@ -152,7 +152,7 @@ struct mlxsw_sp {
        const struct mlxsw_bus_info *bus_info;
        unsigned char base_mac[ETH_ALEN];
        struct mlxsw_sp_upper *lags;
-       u8 *port_to_module;
+       int *port_to_module;
        struct mlxsw_sp_sb *sb;
        struct mlxsw_sp_bridge *bridge;
        struct mlxsw_sp_router *router;
index 632c7b229054cd2994c96590e36c8820e4ee47f3..72ef4f8025f00ff8810c2955b25b7f3baec49be1 100644 (file)
@@ -1370,8 +1370,9 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
                mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
 }
 
-static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
-                                       struct mlxsw_sp_rif *rif);
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_rif *old_rif,
+                                        struct mlxsw_sp_rif *new_rif);
 static int
 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1389,17 +1390,18 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
                return PTR_ERR(new_lb_rif);
        ipip_entry->ol_lb = new_lb_rif;
 
-       if (keep_encap) {
-               list_splice_init(&old_lb_rif->common.nexthop_list,
-                                &new_lb_rif->common.nexthop_list);
-               mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
-       }
+       if (keep_encap)
+               mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
+                                            &new_lb_rif->common);
 
        mlxsw_sp_rif_destroy(&old_lb_rif->common);
 
        return 0;
 }
 
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_rif *rif);
+
 /**
  * Update the offload related to an IPIP entry. This always updates decap, and
  * in addition to that it also:
@@ -1449,9 +1451,27 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_ipip_entry *ipip_entry =
                mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+       enum mlxsw_sp_l3proto ul_proto;
+       union mlxsw_sp_l3addr saddr;
+       u32 ul_tb_id;
 
        if (!ipip_entry)
                return 0;
+
+       /* For flat configuration cases, moving overlay to a different VRF might
+        * cause local address conflict, and the conflicting tunnels need to be
+        * demoted.
+        */
+       ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+       ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+       saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+       if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+                                                saddr, ul_tb_id,
+                                                ipip_entry)) {
+               mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+               return 0;
+       }
+
        return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
                                                   true, false, false, extack);
 }
@@ -3343,22 +3363,19 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
        return ul_dev ? (ul_dev->flags & IFF_UP) : true;
 }
 
-static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
-                                     struct mlxsw_sp_nexthop *nh,
-                                     struct net_device *ol_dev)
+static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
+                                      struct mlxsw_sp_nexthop *nh,
+                                      struct mlxsw_sp_ipip_entry *ipip_entry)
 {
        bool removing;
 
        if (!nh->nh_grp->gateway || nh->ipip_entry)
-               return 0;
-
-       nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
-       if (!nh->ipip_entry)
-               return -ENOENT;
+               return;
 
-       removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
+       nh->ipip_entry = ipip_entry;
+       removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
        __mlxsw_sp_nexthop_neigh_update(nh, removing);
-       return 0;
+       mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
 }
 
 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
@@ -3403,21 +3420,21 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
                                       struct mlxsw_sp_nexthop *nh,
                                       struct fib_nh *fib_nh)
 {
-       struct mlxsw_sp_router *router = mlxsw_sp->router;
+       const struct mlxsw_sp_ipip_ops *ipip_ops;
        struct net_device *dev = fib_nh->nh_dev;
-       enum mlxsw_sp_ipip_type ipipt;
+       struct mlxsw_sp_ipip_entry *ipip_entry;
        struct mlxsw_sp_rif *rif;
        int err;
 
-       if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
-           router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
-                                                    MLXSW_SP_L3_PROTO_IPV4)) {
-               nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
-               err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
-               if (err)
-                       return err;
-               mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
-               return 0;
+       ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+       if (ipip_entry) {
+               ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+               if (ipip_ops->can_offload(mlxsw_sp, dev,
+                                         MLXSW_SP_L3_PROTO_IPV4)) {
+                       nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+                       mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+                       return 0;
+               }
        }
 
        nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3545,6 +3562,18 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
        }
 }
 
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_rif *old_rif,
+                                        struct mlxsw_sp_rif *new_rif)
+{
+       struct mlxsw_sp_nexthop *nh;
+
+       list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
+       list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
+               nh->rif = new_rif;
+       mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
+}
+
 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
                                           struct mlxsw_sp_rif *rif)
 {
@@ -3996,7 +4025,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
        case RTN_LOCAL:
                ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
                                                 MLXSW_SP_L3_PROTO_IPV4, dip);
-               if (ipip_entry) {
+               if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
                        fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
                        return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
                                                             fib_entry,
@@ -4694,21 +4723,21 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
                                       struct mlxsw_sp_nexthop *nh,
                                       const struct rt6_info *rt)
 {
-       struct mlxsw_sp_router *router = mlxsw_sp->router;
+       const struct mlxsw_sp_ipip_ops *ipip_ops;
+       struct mlxsw_sp_ipip_entry *ipip_entry;
        struct net_device *dev = rt->dst.dev;
-       enum mlxsw_sp_ipip_type ipipt;
        struct mlxsw_sp_rif *rif;
        int err;
 
-       if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
-           router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
-                                                    MLXSW_SP_L3_PROTO_IPV6)) {
-               nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
-               err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
-               if (err)
-                       return err;
-               mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
-               return 0;
+       ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+       if (ipip_entry) {
+               ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+               if (ipip_ops->can_offload(mlxsw_sp, dev,
+                                         MLXSW_SP_L3_PROTO_IPV6)) {
+                       nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+                       mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+                       return 0;
+               }
        }
 
        nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
index b171ed2015fe479b6d7d099f14e188a3dd8cda00..2521c8c40015de32abbb3f30dbf8f9d527126b6f 100644 (file)
@@ -3501,7 +3501,7 @@ static void myri10ge_watchdog(struct work_struct *work)
  * cannot detect a NIC with a parity error in a timely fashion if the
  * NIC is lightly loaded.
  */
-static void myri10ge_watchdog_timer(unsigned long arg)
+static void myri10ge_watchdog_timer(struct timer_list *t)
 {
        struct myri10ge_priv *mgp;
        struct myri10ge_slice_state *ss;
@@ -3509,7 +3509,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
        u32 rx_pause_cnt;
        u16 cmd;
 
-       mgp = (struct myri10ge_priv *)arg;
+       mgp = from_timer(mgp, t, watchdog_timer);
 
        rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
        busy_slice_cnt = 0;
@@ -3930,8 +3930,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_save_state(pdev);
 
        /* Setup the watchdog timer */
-       setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
-                   (unsigned long)mgp);
+       timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
 
        netdev->ethtool_ops = &myri10ge_ethtool_ops;
        INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
index fe7e0e1dd01def224bd1932483170e04d488ff72..b2299f2b215502a7d5cbceb37a4d2292040e0d74 100644 (file)
@@ -1530,7 +1530,7 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
                        vxge_debug_init(VXGE_ERR,
                                "vxge_hw_vpath_reset failed for"
                                "vpath:%d", vp_id);
-                               return status;
+                       return status;
                }
        } else
                return VXGE_HW_FAIL;
@@ -1950,19 +1950,19 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
         * for all VPATHs. The h/w only uses the lowest numbered VPATH
         * when steering frames.
         */
-        for (index = 0; index < vdev->no_of_vpath; index++) {
+       for (index = 0; index < vdev->no_of_vpath; index++) {
                status = vxge_hw_vpath_rts_rth_set(
                                vdev->vpaths[index].handle,
                                vdev->config.rth_algorithm,
                                &hash_types,
                                vdev->config.rth_bkt_sz);
-                if (status != VXGE_HW_OK) {
+               if (status != VXGE_HW_OK) {
                        vxge_debug_init(VXGE_ERR,
                                "RTH configuration failed for vpath:%d",
                                vdev->vpaths[index].device_id);
                        return status;
-                }
-        }
+               }
+       }
 
        return status;
 }
@@ -1991,7 +1991,7 @@ static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
                                vxge_debug_init(VXGE_ERR,
                                        "vxge_hw_vpath_reset failed for "
                                        "vpath:%d", i);
-                                       return status;
+                               return status;
                        }
                }
        }
@@ -2474,32 +2474,31 @@ static int vxge_add_isr(struct vxgedev *vdev)
                        switch (msix_idx) {
                        case 0:
                                snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
-                               "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+                                       "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
                                        vdev->ndev->name,
                                        vdev->entries[intr_cnt].entry,
                                        pci_fun, vp_idx);
                                ret = request_irq(
-                                   vdev->entries[intr_cnt].vector,
+                                       vdev->entries[intr_cnt].vector,
                                        vxge_tx_msix_handle, 0,
                                        vdev->desc[intr_cnt],
                                        &vdev->vpaths[vp_idx].fifo);
-                                       vdev->vxge_entries[intr_cnt].arg =
+                               vdev->vxge_entries[intr_cnt].arg =
                                                &vdev->vpaths[vp_idx].fifo;
                                irq_req = 1;
                                break;
                        case 1:
                                snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
-                               "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+                                       "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
                                        vdev->ndev->name,
                                        vdev->entries[intr_cnt].entry,
                                        pci_fun, vp_idx);
                                ret = request_irq(
-                                   vdev->entries[intr_cnt].vector,
-                                       vxge_rx_msix_napi_handle,
-                                       0,
+                                       vdev->entries[intr_cnt].vector,
+                                       vxge_rx_msix_napi_handle, 0,
                                        vdev->desc[intr_cnt],
                                        &vdev->vpaths[vp_idx].ring);
-                                       vdev->vxge_entries[intr_cnt].arg =
+                               vdev->vxge_entries[intr_cnt].arg =
                                                &vdev->vpaths[vp_idx].ring;
                                irq_req = 1;
                                break;
@@ -2512,9 +2511,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
                                vxge_rem_msix_isr(vdev);
                                vdev->config.intr_type = INTA;
                                vxge_debug_init(VXGE_ERR,
-                                       "%s: Defaulting to INTA"
-                                       vdev->ndev->name);
-                                       goto INTA_MODE;
+                                       "%s: Defaulting to INTA",
+                                       vdev->ndev->name);
+                               goto INTA_MODE;
                        }
 
                        if (irq_req) {
@@ -4505,8 +4504,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        if (status != VXGE_HW_OK) {
                vxge_debug_init(VXGE_ERR,
                        "Failed to initialize device (%d)", status);
-                       ret = -EINVAL;
-                       goto _exit3;
+               ret = -EINVAL;
+               goto _exit3;
        }
 
        if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
index b6cee71f49d3d4a0b68afeda4d879f2cd12de5f6..bc879aeb62d4ee95a42654a78ae9329145d22b3f 100644 (file)
@@ -214,8 +214,14 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 {
        int err;
 
-       if (prog && !prog->aux->offload)
-               return -EINVAL;
+       if (prog) {
+               struct bpf_dev_offload *offload = prog->aux->offload;
+
+               if (!offload)
+                       return -EINVAL;
+               if (offload->netdev != nn->dp.netdev)
+                       return -EINVAL;
+       }
 
        if (prog && old_prog) {
                u8 cap;
index e0283bb24f06876d59667484d5e909951bd0947d..8fcc90c0d2d3ad1a3f48348ca2d9c02e190133cb 100644 (file)
@@ -125,6 +125,21 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
        return nfp_flower_cmsg_portmod(repr, false);
 }
 
+static int
+nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
+{
+       return tc_setup_cb_egdev_register(netdev,
+                                         nfp_flower_setup_tc_egress_cb,
+                                         netdev_priv(netdev));
+}
+
+static void
+nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
+{
+       tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
+                                    netdev_priv(netdev));
+}
+
 static void nfp_flower_sriov_disable(struct nfp_app *app)
 {
        struct nfp_flower_priv *priv = app->priv;
@@ -452,6 +467,9 @@ const struct nfp_app_type app_flower = {
        .vnic_init      = nfp_flower_vnic_init,
        .vnic_clean     = nfp_flower_vnic_clean,
 
+       .repr_init      = nfp_flower_repr_netdev_init,
+       .repr_clean     = nfp_flower_repr_netdev_clean,
+
        .repr_open      = nfp_flower_repr_netdev_open,
        .repr_stop      = nfp_flower_repr_netdev_stop,
 
index c90e72b7ff5ac1d47241fa32bf38d91fcc360ddf..e6b26c5ae6e0f11b8bdbe5593603e117bd1d5a4a 100644 (file)
@@ -52,8 +52,7 @@ struct nfp_app;
 #define NFP_FLOWER_MASK_ELEMENT_RS     1
 #define NFP_FLOWER_MASK_HASH_BITS      10
 
-#define NFP_FL_META_FLAG_NEW_MASK      128
-#define NFP_FL_META_FLAG_LAST_MASK     1
+#define NFP_FL_META_FLAG_MANAGE_MASK   BIT(7)
 
 #define NFP_FL_MASK_REUSE_TIME_NS      40000
 #define NFP_FL_MASK_ID_LOCATION                1
@@ -197,5 +196,7 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
 void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+                                 void *cb_priv);
 
 #endif
index 193520ef23f05af42098fd99ed0e18d9c687804f..db977cf8e9332b27a63bd660c4f96b178e54cd14 100644 (file)
@@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
                id = nfp_add_mask_table(app, mask_data, mask_len);
                if (id < 0)
                        return false;
-               *meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
+               *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
        }
        *mask_id = id;
 
@@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
        if (!mask_entry)
                return false;
 
+       if (meta_flags)
+               *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+
        *mask_id = mask_entry->mask_id;
        mask_entry->ref_cnt--;
        if (!mask_entry->ref_cnt) {
@@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
                nfp_release_mask_id(app, *mask_id);
                kfree(mask_entry);
                if (meta_flags)
-                       *meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
+                       *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
        }
 
        return true;
index cdbb5464b790372edede5c9005833d0766ec828c..553f94f55dce64ba9cdb661ff9bdb04faf41acac 100644 (file)
@@ -131,7 +131,8 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
 
 static int
 nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
-                               struct tc_cls_flower_offload *flow)
+                               struct tc_cls_flower_offload *flow,
+                               bool egress)
 {
        struct flow_dissector_key_basic *mask_basic = NULL;
        struct flow_dissector_key_basic *key_basic = NULL;
@@ -167,6 +168,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
                        skb_flow_dissector_target(flow->dissector,
                                                  FLOW_DISSECTOR_KEY_ENC_CONTROL,
                                                  flow->key);
+               if (!egress)
+                       return -EOPNOTSUPP;
+
                if (mask_enc_ctl->addr_type != 0xffff ||
                    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
                        return -EOPNOTSUPP;
@@ -194,6 +198,9 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
 
                key_layer |= NFP_FLOWER_LAYER_VXLAN;
                key_size += sizeof(struct nfp_flower_vxlan);
+       } else if (egress) {
+               /* Reject non tunnel matches offloaded to egress repr. */
+               return -EOPNOTSUPP;
        }
 
        if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -308,6 +315,7 @@ err_free_flow:
  * @app:       Pointer to the APP handle
  * @netdev:    netdev structure.
  * @flow:      TC flower classifier offload structure.
+ * @egress:    NFP netdev is the egress.
  *
  * Adds a new flow to the repeated hash structure and action payload.
  *
@@ -315,7 +323,7 @@ err_free_flow:
  */
 static int
 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
-                      struct tc_cls_flower_offload *flow)
+                      struct tc_cls_flower_offload *flow, bool egress)
 {
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload *flow_pay;
@@ -326,7 +334,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        if (!key_layer)
                return -ENOMEM;
 
-       err = nfp_flower_calculate_key_layers(key_layer, flow);
+       err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
        if (err)
                goto err_free_key_ls;
 
@@ -447,7 +455,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
 
 static int
 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
-                       struct tc_cls_flower_offload *flower)
+                       struct tc_cls_flower_offload *flower, bool egress)
 {
        if (!eth_proto_is_802_3(flower->common.protocol) ||
            flower->common.chain_index)
@@ -455,7 +463,7 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
 
        switch (flower->command) {
        case TC_CLSFLOWER_REPLACE:
-               return nfp_flower_add_offload(app, netdev, flower);
+               return nfp_flower_add_offload(app, netdev, flower, egress);
        case TC_CLSFLOWER_DESTROY:
                return nfp_flower_del_offload(app, netdev, flower);
        case TC_CLSFLOWER_STATS:
@@ -465,6 +473,23 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
        return -EOPNOTSUPP;
 }
 
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+                                 void *cb_priv)
+{
+       struct nfp_repr *repr = cb_priv;
+
+       if (!tc_can_offload(repr->netdev))
+               return -EOPNOTSUPP;
+
+       switch (type) {
+       case TC_SETUP_CLSFLOWER:
+               return nfp_flower_repr_offload(repr->app, repr->netdev,
+                                              type_data, true);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
                                        void *type_data, void *cb_priv)
 {
@@ -476,7 +501,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
        switch (type) {
        case TC_SETUP_CLSFLOWER:
                return nfp_flower_repr_offload(repr->app, repr->netdev,
-                                              type_data);
+                                              type_data, false);
        default:
                return -EOPNOTSUPP;
        }
index 54b67c9b8d5b6dc7a88785611a3dca6e84b1d563..0e5e0305ad1cea276516a9c50122c4a16575d9fe 100644 (file)
@@ -76,6 +76,8 @@ extern const struct nfp_app_type app_flower;
  * @vnic_free: free up app's vNIC state
  * @vnic_init: vNIC netdev was registered
  * @vnic_clean:        vNIC netdev about to be unregistered
+ * @repr_init: representor about to be registered
+ * @repr_clean:        representor about to be unregistered
  * @repr_open: representor netdev open callback
  * @repr_stop: representor netdev stop callback
  * @start:     start application logic
@@ -109,6 +111,9 @@ struct nfp_app_type {
        int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn);
        void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
 
+       int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+       void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
+
        int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
        int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
 
@@ -212,6 +217,21 @@ static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr)
        return app->type->repr_stop(app, repr);
 }
 
+static inline int
+nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
+{
+       if (!app->type->repr_init)
+               return 0;
+       return app->type->repr_init(app, netdev);
+}
+
+static inline void
+nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
+{
+       if (app->type->repr_clean)
+               app->type->repr_clean(app, netdev);
+}
+
 static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
 {
        app->ctrl = ctrl;
index 60c8d733a37daa47216ccda526ccc7d9ca3448b0..2801ecd09eab098e66ef28aa89fcd5c5f7e7430d 100644 (file)
@@ -104,7 +104,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
        { "rx_frame_too_long_errors",
                        NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
        { "rx_range_length_errors",     NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
-       { "rx_vlan_reveive_ok",         NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, },
+       { "rx_vlan_received_ok",        NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
        { "rx_errors",                  NFP_MAC_STATS_RX_IN_ERRORS, },
        { "rx_broadcast_pkts",          NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
        { "rx_drop_events",             NFP_MAC_STATS_RX_DROP_EVENTS, },
index 1bce8c131bb9c22499da31fc4aa81bae62d62cbd..924a05e05da027523e7845728c6ddc4d41accc0b 100644 (file)
@@ -258,6 +258,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
 static void nfp_repr_clean(struct nfp_repr *repr)
 {
        unregister_netdev(repr->netdev);
+       nfp_app_repr_clean(repr->app, repr->netdev);
        dst_release((struct dst_entry *)repr->dst);
        nfp_port_free(repr->port);
 }
@@ -297,6 +298,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        netdev->netdev_ops = &nfp_repr_netdev_ops;
        netdev->ethtool_ops = &nfp_port_ethtool_ops;
 
+       netdev->max_mtu = pf_netdev->max_mtu;
+
        SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
 
        if (nfp_app_has_tc(app)) {
@@ -304,12 +307,18 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
                netdev->hw_features |= NETIF_F_HW_TC;
        }
 
-       err = register_netdev(netdev);
+       err = nfp_app_repr_init(app, netdev);
        if (err)
                goto err_clean;
 
+       err = register_netdev(netdev);
+       if (err)
+               goto err_repr_clean;
+
        return 0;
 
+err_repr_clean:
+       nfp_app_repr_clean(app, netdev);
 err_clean:
        dst_release((struct dst_entry *)repr->dst);
        return err;
index 51dcb9c603ee42c2bf1a3fa25996555c725507f5..21bd4aa3264681ea0af49aad1b98a71e0c5c4958 100644 (file)
@@ -157,7 +157,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
                                                        /* unused 0x008 */
 #define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS         (NFP_MAC_STATS_BASE + 0x010)
 #define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS           (NFP_MAC_STATS_BASE + 0x018)
-#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK               (NFP_MAC_STATS_BASE + 0x020)
+#define NFP_MAC_STATS_RX_VLAN_RECEIVED_OK              (NFP_MAC_STATS_BASE + 0x020)
 #define NFP_MAC_STATS_RX_IN_ERRORS                     (NFP_MAC_STATS_BASE + 0x028)
 #define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS             (NFP_MAC_STATS_BASE + 0x030)
 #define NFP_MAC_STATS_RX_DROP_EVENTS                   (NFP_MAC_STATS_BASE + 0x038)
index ac8439ceea10affdf7ead137386145cc7cc514d4..481876b5424c9567b6129951aa47f34ed0bee154 100644 (file)
@@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
                                         tx_skb->dma_len,
                                         DMA_TO_DEVICE);
                else
-                       pci_unmap_page(np->pci_dev, tx_skb->dma,
+                       dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
                                       tx_skb->dma_len,
-                                      PCI_DMA_TODEVICE);
+                                      DMA_TO_DEVICE);
                tx_skb->dma = 0;
        }
 }
index 457ee80307ea6612a8a085c981d5897f1dbd9901..40e52ffb732f4fc9f9a10b9444e8c4b2d2722e87 100644 (file)
@@ -1089,9 +1089,10 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
  * pch_gbe_watchdog - Watchdog process
  * @data:  Board private structure
  */
-static void pch_gbe_watchdog(unsigned long data)
+static void pch_gbe_watchdog(struct timer_list *t)
 {
-       struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+       struct pch_gbe_adapter *adapter = from_timer(adapter, t,
+                                                    watchdog_timer);
        struct net_device *netdev = adapter->netdev;
        struct pch_gbe_hw *hw = &adapter->hw;
 
@@ -2644,8 +2645,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev, "Invalid MAC address, "
                                    "interface disabled.\n");
        }
-       setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
-                   (unsigned long)adapter);
+       timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
 
        INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
 
index 49591d9c2e1b9f4bde7217e750396d99f032f302..c9a55b774935cd8900435e308c441d2283e5cb39 100644 (file)
@@ -943,9 +943,9 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
 
 #define TX_CLEAN_INTERVAL HZ
 
-static void pasemi_mac_tx_timer(unsigned long data)
+static void pasemi_mac_tx_timer(struct timer_list *t)
 {
-       struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
+       struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
        struct pasemi_mac *mac = txring->mac;
 
        pasemi_mac_clean_tx(txring);
@@ -1199,8 +1199,7 @@ static int pasemi_mac_open(struct net_device *dev)
        if (dev->phydev)
                phy_start(dev->phydev);
 
-       setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
-                   (unsigned long)mac->tx);
+       timer_setup(&mac->tx->clean_timer, pasemi_mac_tx_timer, 0);
        mod_timer(&mac->tx->clean_timer, jiffies + HZ);
 
        return 0;
index 6e15d3c10ebf798ce1df46199e9cecc3f4727330..fe7c1f230028343da3e06343b059bdd3e6f5f59a 100644 (file)
@@ -1277,11 +1277,10 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
 {
        struct qed_dcbx_get *dcbx_info;
 
-       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC);
        if (!dcbx_info)
                return NULL;
 
-       memset(dcbx_info, 0, sizeof(*dcbx_info));
        if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
                kfree(dcbx_info);
                return NULL;
index 05479d4354696d0a34a573e4cbbe72a41bbfd7c1..9e5264d8773b09a3cdda77a8cec066a44d3359a3 100644 (file)
@@ -3749,9 +3749,9 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
        qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
 }
 
-static void ql3xxx_timer(unsigned long ptr)
+static void ql3xxx_timer(struct timer_list *t)
 {
-       struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+       struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
        queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
 }
 
@@ -3891,7 +3891,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
        INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
        INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
 
-       setup_timer(&qdev->adapter_timer, ql3xxx_timer, (unsigned long)qdev);
+       timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
        qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
 
        if (!cards_found) {
index dcb8c39382e70560a996832785bfbbd1db76d7e9..fc0d5fa65ad4c1ca02073e2d1d1554df68443e86 100644 (file)
@@ -2030,21 +2030,6 @@ out:
        return ret;
 }
 
-static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int ret;
-
-       del_timer_sync(&tp->timer);
-
-       rtl_lock_work(tp);
-       ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
-                               cmd->duplex, cmd->advertising);
-       rtl_unlock_work(tp);
-
-       return ret;
-}
-
 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
        netdev_features_t features)
 {
@@ -2171,6 +2156,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev,
        return rc;
 }
 
+static int rtl8169_set_link_ksettings(struct net_device *dev,
+                                     const struct ethtool_link_ksettings *cmd)
+{
+       struct rtl8169_private *tp = netdev_priv(dev);
+       int rc;
+       u32 advertising;
+
+       if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
+           cmd->link_modes.advertising))
+               return -EINVAL;
+
+       del_timer_sync(&tp->timer);
+
+       rtl_lock_work(tp);
+       rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
+                              cmd->base.duplex, advertising);
+       rtl_unlock_work(tp);
+
+       return rc;
+}
+
 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
                             void *p)
 {
@@ -2591,7 +2597,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_coalesce           = rtl_get_coalesce,
        .set_coalesce           = rtl_set_coalesce,
-       .set_settings           = rtl8169_set_settings,
        .get_msglevel           = rtl8169_get_msglevel,
        .set_msglevel           = rtl8169_set_msglevel,
        .get_regs               = rtl8169_get_regs,
@@ -2603,6 +2608,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_ts_info            = ethtool_op_get_ts_info,
        .nway_reset             = rtl8169_nway_reset,
        .get_link_ksettings     = rtl8169_get_link_ksettings,
+       .set_link_ksettings     = rtl8169_set_link_ksettings,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -3789,27 +3795,32 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy(tp, 0x1f, 0x0000);
 
        /* EEE setting */
-       rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
+       rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
        rtl_writephy(tp, 0x1f, 0x0005);
        rtl_writephy(tp, 0x05, 0x8b85);
-       rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
+       rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000);
        rtl_writephy(tp, 0x1f, 0x0004);
        rtl_writephy(tp, 0x1f, 0x0007);
        rtl_writephy(tp, 0x1e, 0x0020);
-       rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
+       rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000);
        rtl_writephy(tp, 0x1f, 0x0002);
        rtl_writephy(tp, 0x1f, 0x0000);
        rtl_writephy(tp, 0x0d, 0x0007);
        rtl_writephy(tp, 0x0e, 0x003c);
        rtl_writephy(tp, 0x0d, 0x4007);
-       rtl_writephy(tp, 0x0e, 0x0000);
+       rtl_writephy(tp, 0x0e, 0x0006);
        rtl_writephy(tp, 0x0d, 0x0000);
 
        /* Green feature */
        rtl_writephy(tp, 0x1f, 0x0003);
-       rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
-       rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
+       rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
+       rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x1f, 0x0005);
+       rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
        rtl_writephy(tp, 0x1f, 0x0000);
+       /* soft-reset phy */
+       rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
 
        /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
        rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
index 0653b70723a34f973d825d991b83009e7ccab78b..6d6fb8cf3e7c84a28b2de68fd25bc5b575df7659 100644 (file)
@@ -1983,9 +1983,9 @@ err_out:
        return err;
 }
 
-static void ofdpa_fdb_cleanup(unsigned long data)
+static void ofdpa_fdb_cleanup(struct timer_list *t)
 {
-       struct ofdpa *ofdpa = (struct ofdpa *)data;
+       struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
        struct ofdpa_port *ofdpa_port;
        struct ofdpa_fdb_tbl_entry *entry;
        struct hlist_node *tmp;
@@ -2368,8 +2368,7 @@ static int ofdpa_init(struct rocker *rocker)
        hash_init(ofdpa->neigh_tbl);
        spin_lock_init(&ofdpa->neigh_tbl_lock);
 
-       setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
-                   (unsigned long) ofdpa);
+       timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
        mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
 
        ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
index 61cb24810d101194c5285cf27edd963aa488473f..9e6db16af663b5a43c3c1eff26824fab3cb2df6c 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
  *
- * Copyright (C) Alexandre Torgue 2015
- * Author:  Alexandre Torgue <alexandre.torgue@gmail.com>
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author:  Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
  * License terms:  GNU General Public License (GPL), version 2
  *
  */
index ff4fb5eae1af3f0c7f3dcb61492a530a12f19bce..f63c2ddced3c9a1e90f4425b28be95d202379a71 100644 (file)
@@ -345,9 +345,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
  *  if there is no data transfer and if we are not in LPI state,
  *  then MAC Transmitter can be moved to LPI state.
  */
-static void stmmac_eee_ctrl_timer(unsigned long arg)
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+       struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
 
        stmmac_enable_eee_mode(priv);
        mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
@@ -401,9 +401,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                spin_lock_irqsave(&priv->lock, flags);
                if (!priv->eee_active) {
                        priv->eee_active = 1;
-                       setup_timer(&priv->eee_ctrl_timer,
-                                   stmmac_eee_ctrl_timer,
-                                   (unsigned long)priv);
+                       timer_setup(&priv->eee_ctrl_timer,
+                                   stmmac_eee_ctrl_timer, 0);
                        mod_timer(&priv->eee_ctrl_timer,
                                  STMMAC_LPI_T(eee_timer));
 
@@ -2221,9 +2220,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
  * Description:
  * This is the timer handler to directly invoke the stmmac_tx_clean.
  */
-static void stmmac_tx_timer(unsigned long data)
+static void stmmac_tx_timer(struct timer_list *t)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)data;
+       struct stmmac_priv *priv = from_timer(priv, t, txtimer);
        u32 tx_queues_count = priv->plat->tx_queues_to_use;
        u32 queue;
 
@@ -2244,7 +2243,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
 {
        priv->tx_coal_frames = STMMAC_TX_FRAMES;
        priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
-       setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
+       timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
        priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
        add_timer(&priv->txtimer);
 }
index e1b55b8fb8e0917d6fa8537ff029e52d29371ca1..1f8e9601592a679025cc1451b9c686c38ac3176f 100644 (file)
@@ -358,9 +358,9 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void xlgmac_tx_timer(unsigned long data)
+static void xlgmac_tx_timer(struct timer_list *t)
 {
-       struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+       struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
        struct xlgmac_pdata *pdata = channel->pdata;
        struct napi_struct *napi;
 
@@ -391,8 +391,7 @@ static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
                if (!channel->tx_ring)
                        break;
 
-               setup_timer(&channel->tx_timer, xlgmac_tx_timer,
-                           (unsigned long)channel);
+               timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
        }
 }
 
index db8a4bcfc6c74d61088e17db166b087b37c19baa..a73600dceb8bc76184e1f4f89ef8bbc3ac8e24b3 100644 (file)
@@ -119,8 +119,8 @@ do {                                                                \
 #define CPDMA_RXCP             0x60
 
 #define CPSW_POLL_WEIGHT       64
-#define CPSW_MIN_PACKET_SIZE   60
-#define CPSW_MAX_PACKET_SIZE   (1500 + 14 + 4 + 4)
+#define CPSW_MIN_PACKET_SIZE   (VLAN_ETH_ZLEN)
+#define CPSW_MAX_PACKET_SIZE   (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
 #define RX_PRIORITY_MAPPING    0x76543210
 #define TX_PRIORITY_MAPPING    0x33221100
index cd1185e6613387e60b74bdd0a75f8f21b230dcf8..b432a75fb874cec3acc680827e4310b495e65111 100644 (file)
@@ -765,9 +765,9 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
 
-static void cpsw_ale_timer(unsigned long arg)
+static void cpsw_ale_timer(struct timer_list *t)
 {
-       struct cpsw_ale *ale = (struct cpsw_ale *)arg;
+       struct cpsw_ale *ale = from_timer(ale, t, timer);
 
        cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
 
@@ -859,7 +859,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
        cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
        cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
 
-       setup_timer(&ale->timer, cpsw_ale_timer, (unsigned long)ale);
+       timer_setup(&ale->timer, cpsw_ale_timer, 0);
        if (ale->ageout) {
                ale->timer.expires = jiffies + ale->ageout;
                add_timer(&ale->timer);
index 4ad821655e51cdd467fd614530ea8fe8dd4dfb78..e831c49713eecca836746c076c8bb30b70ca381c 100644 (file)
@@ -2745,9 +2745,9 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
        return -EOPNOTSUPP;
 }
 
-static void netcp_ethss_timer(unsigned long arg)
+static void netcp_ethss_timer(struct timer_list *t)
 {
-       struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+       struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
        struct gbe_intf *gbe_intf;
        struct gbe_slave *slave;
 
@@ -3616,8 +3616,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        }
        spin_unlock_bh(&gbe_dev->hw_stats_lock);
 
-       setup_timer(&gbe_dev->timer, netcp_ethss_timer,
-                   (unsigned long)gbe_dev);
+       timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
        gbe_dev->timer.expires   = jiffies + GBE_TIMER_INTERVAL;
        add_timer(&gbe_dev->timer);
        *inst_priv = gbe_dev;
index 8f53d762fbc405cc68325ca8cb5cb8d1ee8ae9c3..5a4e78fde530ad4ac0984975b950eafa1cb0fde5 100644 (file)
@@ -254,7 +254,7 @@ tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
                        spin_unlock_irqrestore(&priv->lock, flags);
                return;
        }
-       priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+       priv->timer.function = tlan_timer;
        if (!in_irq())
                spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -1425,7 +1425,7 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
                tlan_dio_write8(dev->base_addr,
                                TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
                if (priv->timer.function == NULL) {
-                       priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+                       priv->timer.function = tlan_timer;
                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
                        priv->timer_set_at = jiffies;
                        priv->timer_type = TLAN_TIMER_ACTIVITY;
@@ -1576,7 +1576,7 @@ drop_and_reuse:
                tlan_dio_write8(dev->base_addr,
                                TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
                if (priv->timer.function == NULL)  {
-                       priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+                       priv->timer.function = tlan_timer;
                        priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
                        priv->timer_set_at = jiffies;
                        priv->timer_type = TLAN_TIMER_ACTIVITY;
index a913538d32131d81dc7f92b253ae0184e31b474a..d925b8203996691f1c380d70ff28de9f9c0b0a49 100644 (file)
@@ -912,8 +912,9 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
  * packets, including updating the queue tail pointer.
  */
 static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
+spider_net_cleanup_tx_ring(struct timer_list *t)
 {
+       struct spider_net_card *card = from_timer(card, t, tx_timer);
        if ((spider_net_release_tx_chain(card, 0) != 0) &&
            (card->netdev->flags & IFF_UP)) {
                spider_net_kick_tx_dma(card);
@@ -1265,7 +1266,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
        spider_net_refill_rx_chain(card);
        spider_net_enable_rxdmac(card);
 
-       spider_net_cleanup_tx_ring(card);
+       spider_net_cleanup_tx_ring(&card->tx_timer);
 
        /* if all packets are in the stack, enable interrupts and return 0 */
        /* if not, return 1 */
@@ -1977,9 +1978,9 @@ init_firmware_failed:
  * @data: used for pointer to card structure
  *
  */
-static void spider_net_link_phy(unsigned long data)
+static void spider_net_link_phy(struct timer_list *t)
 {
-       struct spider_net_card *card = (struct spider_net_card *)data;
+       struct spider_net_card *card = from_timer(card, t, aneg_timer);
        struct mii_phy *phy = &card->phy;
 
        /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
@@ -2256,14 +2257,11 @@ spider_net_setup_netdev(struct spider_net_card *card)
 
        pci_set_drvdata(card->pdev, netdev);
 
-       setup_timer(&card->tx_timer,
-                   (void(*)(unsigned long))spider_net_cleanup_tx_ring,
-                   (unsigned long)card);
+       timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
        netdev->irq = card->pdev->irq;
 
        card->aneg_count = 0;
-       setup_timer(&card->aneg_timer, spider_net_link_phy,
-                   (unsigned long)card);
+       timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
 
        netif_napi_add(netdev, &card->napi,
                       spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
index 83e6f76eb9654ee2c0ed3ab9a97b91e3cba7ffb5..33949248c829e1d7ab16c5d11a6ca4208efdf43c 100644 (file)
@@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
        else
                name = "Rhine III";
 
-       netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
-                   name, (long)ioaddr, dev->dev_addr, rp->irq);
+       netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
+                   name, ioaddr, dev->dev_addr, rp->irq);
 
        dev_set_drvdata(hwdev, dev);
 
index 6d68c8a8f4f2ac7f732ff6cfe862c71ae2460a27..da4ec575ccf9ba4aede92c0fd3a4842ed4257449 100644 (file)
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
 config XILINX_LL_TEMAC
        tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
        depends on (PPC || MICROBLAZE)
+       depends on !64BIT || BROKEN
        select PHYLIB
        ---help---
          This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
index 4e16d839c3111cf8b3f6c8e6824a56defb2f9633..b718a02a6bb6055d7841619b42cb400d1eea0984 100644 (file)
@@ -1337,21 +1337,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
        }
 
        if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
+#if IS_ENABLED(CONFIG_IPV6)
                if (changelink) {
                        attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
                        goto change_notsup;
                }
                if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
                        info->key.tun_flags &= ~TUNNEL_CSUM;
+#else
+               NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
+                                   "IPv6 support not enabled in the kernel");
+               return -EPFNOSUPPORT;
+#endif
        }
 
        if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
+#if IS_ENABLED(CONFIG_IPV6)
                if (changelink) {
                        attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
                        goto change_notsup;
                }
                if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
                        *use_udp6_rx_checksums = false;
+#else
+               NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
+                                   "IPv6 support not enabled in the kernel");
+               return -EPFNOSUPPORT;
+#endif
        }
 
        return 0;
@@ -1527,11 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
                goto nla_put_failure;
 
        if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
-                       goto nla_put_failure;
+               goto nla_put_failure;
 
+#if IS_ENABLED(CONFIG_IPV6)
        if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
                       !geneve->use_udp6_rx_checksums))
                goto nla_put_failure;
+#endif
 
        return 0;
 
index c9f7215c5dc234599951e2f20a418a7f4c68ff55..3de2729590905328f13ca9c22bb6e8178cd4ac29 100644 (file)
@@ -1005,7 +1005,7 @@ static void __scc_start_tx_timer(struct scc_channel *scc,
        } else 
        if (when != TIMER_OFF)
        {
-               scc->tx_t.function = (TIMER_FUNC_TYPE)handler;
+               scc->tx_t.function = handler;
                scc->tx_t.expires = jiffies + (when*HZ)/100;
                add_timer(&scc->tx_t);
        }
@@ -1031,7 +1031,7 @@ static void scc_start_defer(struct scc_channel *scc)
        
        if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
        {
-               scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_busy;
+               scc->tx_wdog.function = t_busy;
                scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
                add_timer(&scc->tx_wdog);
        }
@@ -1047,7 +1047,7 @@ static void scc_start_maxkeyup(struct scc_channel *scc)
        
        if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
        {
-               scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_maxkeyup;
+               scc->tx_wdog.function = t_maxkeyup;
                scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
                add_timer(&scc->tx_wdog);
        }
@@ -1428,7 +1428,7 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
 
        del_timer(&scc->tx_wdog);
 
-       scc->tx_wdog.function = (TIMER_FUNC_TYPE)scc_stop_calibrate;
+       scc->tx_wdog.function = scc_stop_calibrate;
        scc->tx_wdog.expires = jiffies + HZ*duration;
        add_timer(&scc->tx_wdog);
 
index 4958bb6b737672405f60abbfc830e202f63f1c42..88ddfb92122b0e15a7b031a79c4ec8078af79cd9 100644 (file)
@@ -646,6 +646,10 @@ struct nvsp_message {
 #define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 #define NETVSC_SEND_BUFFER_ID                  0
 
+#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
+                                     NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
+                                     NETIF_F_TSO6)
+
 #define VRSS_SEND_TAB_SIZE 16  /* must be power of 2 */
 #define VRSS_CHANNEL_MAX 64
 #define VRSS_CHANNEL_DEFAULT 8
index da216ca4f2b231b2483a47c0fc249e4b0c529831..5129647d420ca2e9b91bc782f9b8920c573a5c7c 100644 (file)
@@ -2011,7 +2011,7 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
-       /* hw_features computed in rndis_filter_device_add */
+       /* hw_features computed in rndis_netdev_set_hwcaps() */
        net->features = net->hw_features |
                NETIF_F_HIGHDMA | NETIF_F_SG |
                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
index 8b1242b8d8ef98b199304827d2425a447f646f32..7b637c7dd1e5fe726722779ed60434aab49a09d1 100644 (file)
@@ -1131,69 +1131,20 @@ unlock:
        rtnl_unlock();
 }
 
-struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
-                                     struct netvsc_device_info *device_info)
+static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
+                                  struct netvsc_device *nvdev)
 {
-       struct net_device *net = hv_get_drvdata(dev);
+       struct net_device *net = rndis_device->ndev;
        struct net_device_context *net_device_ctx = netdev_priv(net);
-       struct netvsc_device *net_device;
-       struct rndis_device *rndis_device;
        struct ndis_offload hwcaps;
        struct ndis_offload_params offloads;
-       struct ndis_recv_scale_cap rsscap;
-       u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
        unsigned int gso_max_size = GSO_MAX_SIZE;
-       u32 mtu, size;
-       const struct cpumask *node_cpu_mask;
-       u32 num_possible_rss_qs;
-       int i, ret;
-
-       rndis_device = get_rndis_device();
-       if (!rndis_device)
-               return ERR_PTR(-ENODEV);
-
-       /*
-        * Let the inner driver handle this first to create the netvsc channel
-        * NOTE! Once the channel is created, we may get a receive callback
-        * (RndisFilterOnReceive()) before this call is completed
-        */
-       net_device = netvsc_device_add(dev, device_info);
-       if (IS_ERR(net_device)) {
-               kfree(rndis_device);
-               return net_device;
-       }
-
-       /* Initialize the rndis device */
-       net_device->max_chn = 1;
-       net_device->num_chn = 1;
-
-       net_device->extension = rndis_device;
-       rndis_device->ndev = net;
-
-       /* Send the rndis initialization message */
-       ret = rndis_filter_init_device(rndis_device, net_device);
-       if (ret != 0)
-               goto err_dev_remv;
-
-       /* Get the MTU from the host */
-       size = sizeof(u32);
-       ret = rndis_filter_query_device(rndis_device, net_device,
-                                       RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
-                                       &mtu, &size);
-       if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
-               net->mtu = mtu;
-
-       /* Get the mac address */
-       ret = rndis_filter_query_device_mac(rndis_device, net_device);
-       if (ret != 0)
-               goto err_dev_remv;
-
-       memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+       int ret;
 
        /* Find HW offload capabilities */
-       ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps);
+       ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
        if (ret != 0)
-               goto err_dev_remv;
+               return ret;
 
        /* A value of zero means "no change"; now turn on what we want. */
        memset(&offloads, 0, sizeof(struct ndis_offload_params));
@@ -1201,8 +1152,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
        /* Linux does not care about IP checksum, always does in kernel */
        offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
 
+       /* Reset previously set hw_features flags */
+       net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
+       net_device_ctx->tx_checksum_mask = 0;
+
        /* Compute tx offload settings based on hw capabilities */
-       net->hw_features = NETIF_F_RXCSUM;
+       net->hw_features |= NETIF_F_RXCSUM;
 
        if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
                /* Can checksum TCP */
@@ -1246,10 +1201,75 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                }
        }
 
+       /* In case some hw_features disappeared we need to remove them from
+        * net->features list as they're no longer supported.
+        */
+       net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
+
        netif_set_gso_max_size(net, gso_max_size);
 
-       ret = rndis_filter_set_offload_params(net, net_device, &offloads);
-       if (ret)
+       ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
+
+       return ret;
+}
+
+struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+                                     struct netvsc_device_info *device_info)
+{
+       struct net_device *net = hv_get_drvdata(dev);
+       struct netvsc_device *net_device;
+       struct rndis_device *rndis_device;
+       struct ndis_recv_scale_cap rsscap;
+       u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+       u32 mtu, size;
+       const struct cpumask *node_cpu_mask;
+       u32 num_possible_rss_qs;
+       int i, ret;
+
+       rndis_device = get_rndis_device();
+       if (!rndis_device)
+               return ERR_PTR(-ENODEV);
+
+       /* Let the inner driver handle this first to create the netvsc channel
+        * NOTE! Once the channel is created, we may get a receive callback
+        * (RndisFilterOnReceive()) before this call is completed
+        */
+       net_device = netvsc_device_add(dev, device_info);
+       if (IS_ERR(net_device)) {
+               kfree(rndis_device);
+               return net_device;
+       }
+
+       /* Initialize the rndis device */
+       net_device->max_chn = 1;
+       net_device->num_chn = 1;
+
+       net_device->extension = rndis_device;
+       rndis_device->ndev = net;
+
+       /* Send the rndis initialization message */
+       ret = rndis_filter_init_device(rndis_device, net_device);
+       if (ret != 0)
+               goto err_dev_remv;
+
+       /* Get the MTU from the host */
+       size = sizeof(u32);
+       ret = rndis_filter_query_device(rndis_device, net_device,
+                                       RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+                                       &mtu, &size);
+       if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+               net->mtu = mtu;
+
+       /* Get the mac address */
+       ret = rndis_filter_query_device_mac(rndis_device, net_device);
+       if (ret != 0)
+               goto err_dev_remv;
+
+       memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+
+       /* Query and set hardware capabilities */
+       ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
+       if (ret != 0)
                goto err_dev_remv;
 
        rndis_filter_query_device_link_status(rndis_device, net_device);
index f2a7e929316e12e86e680468f44a36612d69f5df..11c1e7950fe58002b1b2b52e6af395dbfc7b6863 100644 (file)
@@ -116,7 +116,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
        return false;
 }
 
-static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
 {
        void *lyr3h = NULL;
 
@@ -124,7 +124,7 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
        case htons(ETH_P_ARP): {
                struct arphdr *arph;
 
-               if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+               if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
                        return NULL;
 
                arph = arp_hdr(skb);
@@ -165,8 +165,26 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
                /* Only Neighbour Solicitation pkts need different treatment */
                if (ipv6_addr_any(&ip6h->saddr) &&
                    ip6h->nexthdr == NEXTHDR_ICMP) {
+                       struct icmp6hdr *icmph;
+
+                       if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
+                               return NULL;
+
+                       ip6h = ipv6_hdr(skb);
+                       icmph = (struct icmp6hdr *)(ip6h + 1);
+
+                       if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+                               /* Need to access the ipv6 address in body */
+                               if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+                                               + sizeof(struct in6_addr))))
+                                       return NULL;
+
+                               ip6h = ipv6_hdr(skb);
+                               icmph = (struct icmp6hdr *)(ip6h + 1);
+                       }
+
                        *type = IPVL_ICMPV6;
-                       lyr3h = ip6h + 1;
+                       lyr3h = icmph;
                }
                break;
        }
@@ -510,7 +528,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
        struct ipvl_addr *addr;
        int addr_type;
 
-       lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+       lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
        if (!lyr3h)
                goto out;
 
@@ -539,7 +557,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
 
        if (!ipvlan_is_vepa(ipvlan->port) &&
            ether_addr_equal(eth->h_dest, eth->h_source)) {
-               lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+               lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
                if (lyr3h) {
                        addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
                        if (addr) {
@@ -606,7 +624,7 @@ static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
        int addr_type;
 
        if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
-               lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+               lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
                if (!lyr3h)
                        return true;
 
@@ -627,7 +645,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
        struct sk_buff *skb = *pskb;
        rx_handler_result_t ret = RX_HANDLER_PASS;
 
-       lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+       lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
        if (!lyr3h)
                goto out;
 
@@ -666,7 +684,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
        } else {
                struct ipvl_addr *addr;
 
-               lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+               lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
                if (!lyr3h)
                        return ret;
 
@@ -717,7 +735,7 @@ static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
        if (!port || port->mode != IPVLAN_MODE_L3S)
                goto out;
 
-       lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+       lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
        if (!lyr3h)
                goto out;
 
index a266aa435d4d6b267e47422555c694ce6c96b478..30cb803e2fe5b31d8fc056b88754e7996eee4955 100644 (file)
@@ -107,16 +107,6 @@ static int ipvlan_port_create(struct net_device *dev)
        struct ipvl_port *port;
        int err, idx;
 
-       if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
-               netdev_err(dev, "Master is either lo or non-ether device\n");
-               return -EINVAL;
-       }
-
-       if (netdev_is_rx_handler_busy(dev)) {
-               netdev_err(dev, "Device is already in use.\n");
-               return -EBUSY;
-       }
-
        port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
        if (!port)
                return -ENOMEM;
@@ -179,8 +169,9 @@ static void ipvlan_port_destroy(struct net_device *dev)
 static int ipvlan_init(struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
-       const struct net_device *phy_dev = ipvlan->phy_dev;
-       struct ipvl_port *port = ipvlan->port;
+       struct net_device *phy_dev = ipvlan->phy_dev;
+       struct ipvl_port *port;
+       int err;
 
        dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
                     (phy_dev->state & IPVLAN_STATE_MASK);
@@ -196,18 +187,27 @@ static int ipvlan_init(struct net_device *dev)
        if (!ipvlan->pcpu_stats)
                return -ENOMEM;
 
+       if (!netif_is_ipvlan_port(phy_dev)) {
+               err = ipvlan_port_create(phy_dev);
+               if (err < 0) {
+                       free_percpu(ipvlan->pcpu_stats);
+                       return err;
+               }
+       }
+       port = ipvlan_port_get_rtnl(phy_dev);
        port->count += 1;
-
        return 0;
 }
 
 static void ipvlan_uninit(struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
-       struct ipvl_port *port = ipvlan->port;
+       struct net_device *phy_dev = ipvlan->phy_dev;
+       struct ipvl_port *port;
 
        free_percpu(ipvlan->pcpu_stats);
 
+       port = ipvlan_port_get_rtnl(phy_dev);
        port->count -= 1;
        if (!port->count)
                ipvlan_port_destroy(port->dev);
@@ -554,7 +554,6 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        struct net_device *phy_dev;
        int err;
        u16 mode = IPVLAN_MODE_L3;
-       bool create = false;
 
        if (!tb[IFLA_LINK])
                return -EINVAL;
@@ -568,28 +567,41 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
 
                phy_dev = tmp->phy_dev;
        } else if (!netif_is_ipvlan_port(phy_dev)) {
-               err = ipvlan_port_create(phy_dev);
-               if (err < 0)
-                       return err;
-               create = true;
-       }
+               /* Exit early if the underlying link is invalid or busy */
+               if (phy_dev->type != ARPHRD_ETHER ||
+                   phy_dev->flags & IFF_LOOPBACK) {
+                       netdev_err(phy_dev,
+                                  "Master is either lo or non-ether device\n");
+                       return -EINVAL;
+               }
 
-       if (data && data[IFLA_IPVLAN_MODE])
-               mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+               if (netdev_is_rx_handler_busy(phy_dev)) {
+                       netdev_err(phy_dev, "Device is already in use.\n");
+                       return -EBUSY;
+               }
+       }
 
-       port = ipvlan_port_get_rtnl(phy_dev);
        ipvlan->phy_dev = phy_dev;
        ipvlan->dev = dev;
-       ipvlan->port = port;
        ipvlan->sfeatures = IPVLAN_FEATURES;
        ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
 
-       /* Flags are per port and latest update overrides. User has
-        * to be consistent in setting it just like the mode attribute.
+       /* TODO Probably put random address here to be presented to the
+        * world but keep using the physical-dev address for the outgoing
+        * packets.
         */
-       if (data && data[IFLA_IPVLAN_FLAGS])
-               ipvlan->port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
+       memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+
+       dev->priv_flags |= IFF_IPVLAN_SLAVE;
+
+       err = register_netdevice(dev);
+       if (err < 0)
+               return err;
+
+       /* ipvlan_init() would have created the port, if required */
+       port = ipvlan_port_get_rtnl(phy_dev);
+       ipvlan->port = port;
 
        /* If the port-id base is at the MAX value, then wrap it around and
         * begin from 0x1 again. This may be due to a busy system where lots
@@ -609,31 +621,28 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
                err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
                                     GFP_KERNEL);
        if (err < 0)
-               goto destroy_ipvlan_port;
+               goto unregister_netdev;
        dev->dev_id = err;
+
        /* Increment id-base to the next slot for the future assignment */
        port->dev_id_start = err + 1;
 
-       /* TODO Probably put random address here to be presented to the
-        * world but keep using the physical-dev address for the outgoing
-        * packets.
-        */
-       memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+       err = netdev_upper_dev_link(phy_dev, dev, extack);
+       if (err)
+               goto remove_ida;
 
-       dev->priv_flags |= IFF_IPVLAN_SLAVE;
+       /* Flags are per port and latest update overrides. User has
+        * to be consistent in setting it just like the mode attribute.
+        */
+       if (data && data[IFLA_IPVLAN_FLAGS])
+               port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
 
-       err = register_netdevice(dev);
-       if (err < 0)
-               goto remove_ida;
+       if (data && data[IFLA_IPVLAN_MODE])
+               mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
 
-       err = netdev_upper_dev_link(phy_dev, dev, extack);
-       if (err) {
-               goto unregister_netdev;
-       }
        err = ipvlan_set_port_mode(port, mode);
-       if (err) {
+       if (err)
                goto unlink_netdev;
-       }
 
        list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
        netif_stacked_transfer_operstate(phy_dev, dev);
@@ -641,13 +650,10 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
 
 unlink_netdev:
        netdev_upper_dev_unlink(phy_dev, dev);
-unregister_netdev:
-       unregister_netdevice(dev);
 remove_ida:
        ida_simple_remove(&port->ida, dev->dev_id);
-destroy_ipvlan_port:
-       if (create)
-               ipvlan_port_destroy(phy_dev);
+unregister_netdev:
+       unregister_netdevice(dev);
        return err;
 }
 EXPORT_SYMBOL_GPL(ipvlan_link_new);
index 2c98152d1e1bf76d247ef8ad0b0c8ae156907cf0..1d025ab9568f216172d6fd1049056b6187ab7c40 100644 (file)
@@ -2411,7 +2411,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
        if (!hdr)
                return -EMSGSIZE;
 
-       genl_dump_check_consistent(cb, hdr, &macsec_fam);
+       genl_dump_check_consistent(cb, hdr);
 
        if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
                goto nla_put_failure;
index 72f4228a63bb0a9fb0f3baa993fa85fc8ad6f823..9442db2218348713e87faf91ccc25f4a8444526a 100644 (file)
@@ -116,3 +116,7 @@ static struct mdio_device_id __maybe_unused cortina_tbl[] = {
 };
 
 MODULE_DEVICE_TABLE(mdio, cortina_tbl);
+
+MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver");
+MODULE_AUTHOR("NXP");
+MODULE_LICENSE("GPL");
index aebc08beceba33aef3bd1440eeb0f589e0150cff..21b3f36e023a962fdabbeac10402025f4acde82c 100644 (file)
@@ -16,6 +16,7 @@
  * link takes priority and the other port is completely locked out.
  */
 #include <linux/phy.h>
+#include <linux/marvell_phy.h>
 
 enum {
        MV_PCS_BASE_T           = 0x0000,
@@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
 static struct phy_driver mv3310_drivers[] = {
        {
                .phy_id         = 0x002b09aa,
-               .phy_id_mask    = 0xffffffff,
+               .phy_id_mask    = MARVELL_PHY_ID_MASK,
                .name           = "mv88x3310",
                .features       = SUPPORTED_10baseT_Full |
                                  SUPPORTED_100baseT_Full |
@@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
 module_phy_driver(mv3310_drivers);
 
 static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
-       { 0x002b09aa, 0xffffffff },
+       { 0x002b09aa, MARVELL_PHY_ID_MASK },
        { },
 };
 MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
index eb8a18991d8c7c78c45ba828b79479263acbf1a9..cc63102ca96e0056ac8f1755c5b43b7bd878ccf0 100644 (file)
@@ -106,8 +106,8 @@ static int slip_esc6(unsigned char *p, unsigned char *d, int len);
 static void slip_unesc6(struct slip *sl, unsigned char c);
 #endif
 #ifdef CONFIG_SLIP_SMART
-static void sl_keepalive(unsigned long sls);
-static void sl_outfill(unsigned long sls);
+static void sl_keepalive(struct timer_list *t);
+static void sl_outfill(struct timer_list *t);
 static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 #endif
 
@@ -763,8 +763,8 @@ static struct slip *sl_alloc(dev_t line)
        sl->mode        = SL_MODE_DEFAULT;
 #ifdef CONFIG_SLIP_SMART
        /* initialize timer_list struct */
-       setup_timer(&sl->keepalive_timer, sl_keepalive, (unsigned long)sl);
-       setup_timer(&sl->outfill_timer, sl_outfill, (unsigned long)sl);
+       timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
+       timer_setup(&sl->outfill_timer, sl_outfill, 0);
 #endif
        slip_devs[i] = dev;
        return sl;
@@ -1388,9 +1388,9 @@ module_exit(slip_exit);
  * added by Stanislav Voronyi. All changes before marked VSV
  */
 
-static void sl_outfill(unsigned long sls)
+static void sl_outfill(struct timer_list *t)
 {
-       struct slip *sl = (struct slip *)sls;
+       struct slip *sl = from_timer(sl, t, outfill_timer);
 
        spin_lock(&sl->lock);
 
@@ -1419,9 +1419,9 @@ out:
        spin_unlock(&sl->lock);
 }
 
-static void sl_keepalive(unsigned long sls)
+static void sl_keepalive(struct timer_list *t)
 {
-       struct slip *sl = (struct slip *)sls;
+       struct slip *sl = from_timer(sl, t, keepalive_timer);
 
        spin_lock(&sl->lock);
 
index b13890953ebb92515b3924f511714942a912b120..e9489b88407ce1677385fe480592958b57d02c8d 100644 (file)
@@ -1077,7 +1077,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN))
+                           TUN_F_TSO_ECN | TUN_F_UFO))
                        return -EINVAL;
 
                rtnl_lock();
index 228d4aa6d9ae3e08d688296cc4d8e81e6dcde81e..ca5e375de27c131534ce1bd2768b787be2fb908f 100644 (file)
@@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
                if (ring->ring->is_tx) {
                        dir = DMA_TO_DEVICE;
                        order = 0;
-                       size = tbnet_frame_size(tf);
+                       size = TBNET_FRAME_SIZE;
                } else {
                        dir = DMA_FROM_DEVICE;
                        order = TBNET_RX_PAGE_ORDER;
@@ -512,6 +512,7 @@ err_free:
 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
 {
        struct tbnet_ring *ring = &net->tx_ring;
+       struct device *dma_dev = tb_ring_dma_device(ring->ring);
        struct tbnet_frame *tf;
        unsigned int index;
 
@@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
 
        tf = &ring->frames[index];
        tf->frame.size = 0;
-       tf->frame.buffer_phy = 0;
+
+       dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
+                               tbnet_frame_size(tf), DMA_TO_DEVICE);
 
        return tf;
 }
@@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
                              bool canceled)
 {
        struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
-       struct device *dma_dev = tb_ring_dma_device(ring);
        struct tbnet *net = netdev_priv(tf->dev);
 
-       dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
-                      DMA_TO_DEVICE);
-       tf->frame.buffer_phy = 0;
-
        /* Return buffer to the ring */
        net->tx_ring.prod++;
 
@@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
 static int tbnet_alloc_tx_buffers(struct tbnet *net)
 {
        struct tbnet_ring *ring = &net->tx_ring;
+       struct device *dma_dev = tb_ring_dma_device(ring->ring);
        unsigned int i;
 
        for (i = 0; i < TBNET_RING_SIZE; i++) {
                struct tbnet_frame *tf = &ring->frames[i];
+               dma_addr_t dma_addr;
 
                tf->page = alloc_page(GFP_KERNEL);
                if (!tf->page) {
@@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
                        return -ENOMEM;
                }
 
+               dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
+                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(dma_dev, dma_addr)) {
+                       __free_page(tf->page);
+                       tf->page = NULL;
+                       tbnet_free_buffers(ring);
+                       return -ENOMEM;
+               }
+
                tf->dev = net->dev;
+               tf->frame.buffer_phy = dma_addr;
                tf->frame.callback = tbnet_tx_callback;
                tf->frame.sof = TBIP_PDF_FRAME_START;
                tf->frame.eof = TBIP_PDF_FRAME_END;
@@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)
        return 0;
 }
 
-static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
-{
-       dma_addr_t dma_addr;
-
-       dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
-                               DMA_TO_DEVICE);
-       if (dma_mapping_error(dma_dev, dma_addr))
-               return false;
-
-       tf->frame.buffer_phy = dma_addr;
-       return true;
-}
-
 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
        struct tbnet_frame **frames, u32 frame_count)
 {
@@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                /* No need to calculate checksum so we just update the
-                * total frame count and map the frames for DMA.
+                * total frame count and sync the frames for DMA.
                 */
                for (i = 0; i < frame_count; i++) {
                        hdr = page_address(frames[i]->page);
                        hdr->frame_count = cpu_to_le32(frame_count);
-                       if (!tbnet_xmit_map(dma_dev, frames[i]))
-                               goto err_unmap;
+                       dma_sync_single_for_device(dma_dev,
+                               frames[i]->frame.buffer_phy,
+                               tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
                }
 
                return true;
@@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
        *tucso = csum_fold(wsum);
 
        /* Checksum is finally calculated and we don't touch the memory
-        * anymore, so DMA map the frames now.
+        * anymore, so DMA sync the frames now.
         */
        for (i = 0; i < frame_count; i++) {
-               if (!tbnet_xmit_map(dma_dev, frames[i]))
-                       goto err_unmap;
+               dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
+                       tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
        }
 
        return true;
-
-err_unmap:
-       while (i--)
-               dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
-                              tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
-
-       return false;
 }
 
 static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
index 6bb1e604aadd68b6060df277491478899ccc7e83..95749006d687b971a49894c903fcc611bc25c375 100644 (file)
@@ -444,9 +444,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
        spin_unlock_bh(&tun->lock);
 }
 
-static void tun_flow_cleanup(unsigned long data)
+static void tun_flow_cleanup(struct timer_list *t)
 {
-       struct tun_struct *tun = (struct tun_struct *)data;
+       struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
        unsigned long delay = tun->ageing_time;
        unsigned long next_timer = jiffies + delay;
        unsigned long count = 0;
@@ -1196,7 +1196,9 @@ static void tun_flow_init(struct tun_struct *tun)
                INIT_HLIST_HEAD(&tun->flows[i]);
 
        tun->ageing_time = TUN_FLOW_EXPIRE;
-       setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+       timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
+       mod_timer(&tun->flow_gc_timer,
+                 round_jiffies_up(jiffies + tun->ageing_time));
 }
 
 static void tun_flow_uninit(struct tun_struct *tun)
@@ -1485,6 +1487,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
                        err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
                        if (err)
                                goto err_redirect;
+                       rcu_read_unlock();
                        return NULL;
                case XDP_TX:
                        xdp_xmit = true;
@@ -1517,7 +1520,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
        if (xdp_xmit) {
                skb->dev = tun->dev;
                generic_xdp_tx(skb, xdp_prog);
-               rcu_read_lock();
+               rcu_read_unlock();
                return NULL;
        }
 
@@ -2369,6 +2372,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
+
+               arg &= ~TUN_F_UFO;
        }
 
        /* This gives the user a way to test for new features in future by
index 42d7edcc3106c95e1215b38074c82297c64298d9..981c931a7a1fd9d4ceedfd7d9ed266af1c970264 100644 (file)
@@ -76,7 +76,6 @@
 
 #define MOD_AUTHOR                     "Option Wireless"
 #define MOD_DESCRIPTION                        "USB High Speed Option driver"
-#define MOD_LICENSE                    "GPL"
 
 #define HSO_MAX_NET_DEVICES            10
 #define HSO__MAX_MTU                   2048
@@ -3286,7 +3285,7 @@ module_exit(hso_exit);
 
 MODULE_AUTHOR(MOD_AUTHOR);
 MODULE_DESCRIPTION(MOD_DESCRIPTION);
-MODULE_LICENSE(MOD_LICENSE);
+MODULE_LICENSE("GPL");
 
 /* change the debug level (eg: insmod hso.ko debug=0x04) */
 MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
index ca71f6c0385928a977c70096bbac49ca953b5d2d..7275761a1177ca9cda569bfc734bb6de3e1558e1 100644 (file)
@@ -291,12 +291,15 @@ static void ipheth_sndbulk_callback(struct urb *urb)
 
 static int ipheth_carrier_set(struct ipheth_device *dev)
 {
-       struct usb_device *udev = dev->udev;
+       struct usb_device *udev;
        int retval;
+
        if (!dev)
                return 0;
        if (!dev->confirmed_pairing)
                return 0;
+
+       udev = dev->udev;
        retval = usb_control_msg(udev,
                        usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
                        IPHETH_CMD_CARRIER_CHECK, /* request */
index 720a3a248070ccd9db305d3c26349392448f0027..c750cf7c042b004ecfbbce64aefb3d0f1d512c82 100644 (file)
@@ -1239,6 +1239,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},    /* SIMCom 7230E */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net. b/drivers/net/virtio_net.
deleted file mode 100644 (file)
index e69de29..0000000
index 7ac487031b4bca89b13f6c6fa5312651e1901661..19b9cc51079e75346af766c91786d66eaa92c3f2 100644 (file)
@@ -874,8 +874,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
                              const unsigned char *addr, union vxlan_addr ip,
-                             __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
-                             u16 vid)
+                             __be16 port, __be32 src_vni, __be32 vni,
+                             u32 ifindex, u16 vid)
 {
        struct vxlan_fdb *f;
        struct vxlan_rdst *rd = NULL;
index c7721c729541ea2d5d9d439bb4176b0950e3c16c..afeca6bcdade60a45fb6588cb69e69e32014cc7f 100644 (file)
@@ -558,9 +558,9 @@ out:
        return NET_RX_DROP;
 }
 
-static void ppp_timer(unsigned long arg)
+static void ppp_timer(struct timer_list *t)
 {
-       struct proto *proto = (struct proto *)arg;
+       struct proto *proto = from_timer(proto, t, timer);
        struct ppp *ppp = get_ppp(proto->dev);
        unsigned long flags;
 
@@ -610,7 +610,7 @@ static void ppp_start(struct net_device *dev)
        for (i = 0; i < IDX_COUNT; i++) {
                struct proto *proto = &ppp->protos[i];
                proto->dev = dev;
-               setup_timer(&proto->timer, ppp_timer, (unsigned long)proto);
+               timer_setup(&proto->timer, ppp_timer, 0);
                proto->state = CLOSED;
        }
        ppp->protos[IDX_LCP].pid = PID_LCP;
index 37b1e0d03e31d09a29fda60f5173cfea49e0f76a..90a4ad9a2d081eb582570476a41a55f71e911dd6 100644 (file)
@@ -494,18 +494,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
                             break;
                     }
 
-                    data = kmalloc(xc.len, GFP_KERNEL);
-                    if (!data) {
-                            ret = -ENOMEM;
+                    data = memdup_user(xc.data, xc.len);
+                    if (IS_ERR(data)) {
+                            ret = PTR_ERR(data);
                             break;
                     }
-                    
-                    if(copy_from_user(data, xc.data, xc.len))
-                    {
-                       kfree(data);
-                       ret = -ENOMEM;
-                       break;
-                    }
 
                     printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
 
index e31438541ee1defbcd9e19c73cf66a12f07316d3..7d295ee715349d90128330975ebc9d81d5a19235 100644 (file)
@@ -566,18 +566,16 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
 
 #define MICHAEL_MIC_LEN 8
 
-static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
-                                        enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+                                       enum htt_rx_mpdu_encrypt_type type)
 {
        switch (type) {
        case HTT_RX_MPDU_ENCRYPT_NONE:
-               return 0;
        case HTT_RX_MPDU_ENCRYPT_WEP40:
        case HTT_RX_MPDU_ENCRYPT_WEP104:
-               return IEEE80211_WEP_ICV_LEN;
        case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
        case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
-               return IEEE80211_TKIP_ICV_LEN;
+               return 0;
        case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
                return IEEE80211_CCMP_MIC_LEN;
        case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
@@ -594,6 +592,31 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
        return 0;
 }
 
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+                                       enum htt_rx_mpdu_encrypt_type type)
+{
+       switch (type) {
+       case HTT_RX_MPDU_ENCRYPT_NONE:
+       case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+       case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+       case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+       case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+               return 0;
+       case HTT_RX_MPDU_ENCRYPT_WEP40:
+       case HTT_RX_MPDU_ENCRYPT_WEP104:
+               return IEEE80211_WEP_ICV_LEN;
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+               return IEEE80211_TKIP_ICV_LEN;
+       case HTT_RX_MPDU_ENCRYPT_WEP128:
+       case HTT_RX_MPDU_ENCRYPT_WAPI:
+               break;
+       }
+
+       ath10k_warn(ar, "unsupported encryption type %d\n", type);
+       return 0;
+}
+
 struct amsdu_subframe_hdr {
        u8 dst[ETH_ALEN];
        u8 src[ETH_ALEN];
@@ -1063,25 +1086,27 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
        /* Tail */
        if (status->flag & RX_FLAG_IV_STRIPPED) {
                skb_trim(msdu, msdu->len -
-                        ath10k_htt_rx_crypto_tail_len(ar, enctype));
+                        ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+               skb_trim(msdu, msdu->len -
+                        ath10k_htt_rx_crypto_icv_len(ar, enctype));
        } else {
                /* MIC */
-               if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
-                   enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
-                       skb_trim(msdu, msdu->len - 8);
+               if (status->flag & RX_FLAG_MIC_STRIPPED)
+                       skb_trim(msdu, msdu->len -
+                                ath10k_htt_rx_crypto_mic_len(ar, enctype));
 
                /* ICV */
-               if (status->flag & RX_FLAG_ICV_STRIPPED &&
-                   enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+               if (status->flag & RX_FLAG_ICV_STRIPPED)
                        skb_trim(msdu, msdu->len -
-                                ath10k_htt_rx_crypto_tail_len(ar, enctype));
+                                ath10k_htt_rx_crypto_icv_len(ar, enctype));
        }
 
        /* MMIC */
        if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
            enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
-               skb_trim(msdu, msdu->len - 8);
+               skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
 
        /* Head */
        if (status->flag & RX_FLAG_IV_STRIPPED) {
index dfb26f03c1a27a48ad97e681310b3d41a9bab129..1b05b5d7a03867ecc299f9941dbab3466159e9c9 100644 (file)
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
                if (!avp->assoc)
                        return false;
 
-               skb = ieee80211_nullfunc_get(sc->hw, vif);
+               skb = ieee80211_nullfunc_get(sc->hw, vif, false);
                if (!skb)
                        return false;
 
index 71812a2dd513d3192a4ec2d2243da6726d0c5e5e..f7d228b5ba933f744474be119802f41e461c5715 100644 (file)
@@ -1233,7 +1233,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
        }
 
        /* External RF module */
-       iris_node = of_find_node_by_name(mmio_node, "iris");
+       iris_node = of_get_child_by_name(mmio_node, "iris");
        if (iris_node) {
                if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
                        wcn->rf_id = RF_IRIS_WCN3620;
index ede89d4ffc8824cbf8e43dc0e78e227f02bb1539..e99e766a302851e36b34f338cd819b6eaf616637 100644 (file)
@@ -518,11 +518,11 @@ exit:
 
 /* LED trigger */
 static int tx_activity;
-static void at76_ledtrig_tx_timerfunc(unsigned long data);
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused);
 static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc);
 DEFINE_LED_TRIGGER(ledtrig_tx);
 
-static void at76_ledtrig_tx_timerfunc(unsigned long data)
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused)
 {
        static int tx_lastactivity;
 
index 3559fb5b8fb03a964461be8815f12a4dad3cba3a..03aae6bc18388a529f3a64cd5501ac8d5ed38deb 100644 (file)
@@ -280,9 +280,9 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
 /**
  * brcmf_btcoex_timerfunc() - BT coex timer callback
  */
-static void brcmf_btcoex_timerfunc(ulong data)
+static void brcmf_btcoex_timerfunc(struct timer_list *t)
 {
-       struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
+       struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
        brcmf_dbg(TRACE, "enter\n");
 
        bt_local->timer_on = false;
@@ -380,7 +380,7 @@ int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
        /* Set up timer for BT  */
        btci->timer_on = false;
        btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
-       setup_timer(&btci->timer, brcmf_btcoex_timerfunc, (ulong)btci);
+       timer_setup(&btci->timer, brcmf_btcoex_timerfunc, 0);
        btci->cfg = cfg;
        btci->saved_regs_part1 = false;
        btci->saved_regs_part2 = false;
index 6e70df97815944cb3868c7bed7b68689f695cbc8..15fa00d79fc66bb7eb7d7c770c6980ee45333355 100644 (file)
@@ -2983,10 +2983,10 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
        brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
 }
 
-static void brcmf_escan_timeout(unsigned long data)
+static void brcmf_escan_timeout(struct timer_list *t)
 {
        struct brcmf_cfg80211_info *cfg =
-                       (struct brcmf_cfg80211_info *)data;
+                       from_timer(cfg, t, escan_timeout);
 
        if (cfg->int_escan_map || cfg->scan_request) {
                brcmf_err("timer expired\n");
@@ -3150,8 +3150,7 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
                            brcmf_cfg80211_escan_handler);
        cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
        /* Init scan_timeout timer */
-       setup_timer(&cfg->escan_timeout, brcmf_escan_timeout,
-                   (unsigned long)cfg);
+       timer_setup(&cfg->escan_timeout, brcmf_escan_timeout, 0);
        INIT_WORK(&cfg->escan_timeout_work,
                  brcmf_cfg80211_escan_timeout_worker);
 }
index e3495ea95553fb2d2056421d5ff0845b8f26e83a..310c4e2746aab1da08d62e58812219b5271507e9 100644 (file)
@@ -3972,9 +3972,9 @@ brcmf_sdio_watchdog_thread(void *data)
 }
 
 static void
-brcmf_sdio_watchdog(unsigned long data)
+brcmf_sdio_watchdog(struct timer_list *t)
 {
-       struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+       struct brcmf_sdio *bus = from_timer(bus, t, timer);
 
        if (bus->watchdog_tsk) {
                complete(&bus->watchdog_wait);
@@ -4169,8 +4169,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        init_waitqueue_head(&bus->dcmd_resp_wait);
 
        /* Set up the watchdog timer */
-       setup_timer(&bus->timer, brcmf_sdio_watchdog,
-                   (unsigned long)bus);
+       timer_setup(&bus->timer, brcmf_sdio_watchdog, 0);
        /* Initialize watchdog thread */
        init_completion(&bus->watchdog_wait);
        bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
index af7c4f36b66f9fcd133105d4430009b3e97d8aa0..e7e75b4580052f4591365b4eac580f809324282b 100644 (file)
 #define IWL9000_SMEM_OFFSET            0x400000
 #define IWL9000_SMEM_LEN               0x68000
 
-#define  IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define  IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define  IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
 #define  IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
 #define  IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
 #define  IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000_MODULE_FIRMWARE(api) \
-       IWL9000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000A_MODULE_FIRMWARE(api) \
+       IWL9000A_FW_PRE __stringify(api) ".ucode"
+#define IWL9000B_MODULE_FIRMWARE(api) \
+       IWL9000B_FW_PRE __stringify(api) ".ucode"
 #define IWL9000RFB_MODULE_FIRMWARE(api) \
-       IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
+       IWL9000RFB_FW_PRE __stringify(api) ".ucode"
 #define IWL9260A_MODULE_FIRMWARE(api) \
-       IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+       IWL9260A_FW_PRE __stringify(api) ".ucode"
 #define IWL9260B_MODULE_FIRMWARE(api) \
-       IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
+       IWL9260B_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_9000         10
 
@@ -194,7 +197,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
        .nvm_ver = IWL9000_NVM_VERSION,
        .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
        .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_soc = {
+       .name = "Intel(R) Dual Band Wireless AC 9460",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
        .integrated = true,
+       .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_soc = {
+               .name = "Intel(R) Dual Band Wireless AC 9461",
+               .fw_name_pre = IWL9000A_FW_PRE,
+               .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+               .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+               IWL_DEVICE_9000,
+               .ht_params = &iwl9000_ht_params,
+               .nvm_ver = IWL9000_NVM_VERSION,
+               .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+               .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+               .integrated = true,
+               .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_soc = {
+               .name = "Intel(R) Dual Band Wireless AC 9462",
+               .fw_name_pre = IWL9000A_FW_PRE,
+               .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+               .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+               IWL_DEVICE_9000,
+               .ht_params = &iwl9000_ht_params,
+               .nvm_ver = IWL9000_NVM_VERSION,
+               .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+               .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+               .integrated = true,
+               .soc_latency = 5000,
 };
 
 const struct iwl_cfg iwl9560_2ac_cfg = {
@@ -206,10 +250,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
        .nvm_ver = IWL9000_NVM_VERSION,
        .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
        .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-       .integrated = true,
 };
 
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+const struct iwl_cfg iwl9560_2ac_cfg_soc = {
+       .name = "Intel(R) Dual Band Wireless AC 9560",
+       .fw_name_pre = IWL9000A_FW_PRE,
+       .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+       .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
+       .soc_latency = 5000,
+};
+MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
index ea82065151716ed5bc60ecc29208481f378e4ff9..705f83b02e136f8b3d42501728691fe702db5d19 100644 (file)
 #define IWL_A000_HR_A0_FW_PRE  "iwlwifi-QuQnj-a0-hr-a0-"
 
 #define IWL_A000_HR_MODULE_FIRMWARE(api) \
-       IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+       IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
 #define IWL_A000_JF_MODULE_FIRMWARE(api) \
-       IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
+       IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
 #define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
-       IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
+       IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
 #define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
-       IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
+       IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
 #define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
-       IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
+       IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_A000         10
 
index 2acd94da9efeb48a65304a43032d3be8fd684660..d11d72615de220f2e776f0b0762c8cc7806212d0 100644 (file)
@@ -399,9 +399,9 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
  * was received.  We need to ensure we receive the statistics in order
  * to update the temperature used for calibrating the TXPOWER.
  */
-static void iwl_bg_statistics_periodic(unsigned long data)
+static void iwl_bg_statistics_periodic(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
@@ -556,9 +556,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
  * this function is to perform continuous uCode event logging operation
  * if enabled
  */
-static void iwl_bg_ucode_trace(unsigned long data)
+static void iwl_bg_ucode_trace(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
@@ -1085,11 +1085,9 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
        if (priv->lib->bt_params)
                iwlagn_bt_setup_deferred_work(priv);
 
-       setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
-                   (unsigned long)priv);
+       timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
 
-       setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
-                   (unsigned long)priv);
+       timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
 }
 
 void iwl_cancel_deferred_work(struct iwl_priv *priv)
index 5b73492e7ff71d9041ed12c613069f5925558821..6524533d723c5a48d8bf68bcb3fabf9daa4c9ab9 100644 (file)
@@ -164,9 +164,10 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
  * without doing anything, driver should continue the 5 seconds timer
  * to wake up uCode for temperature check until temperature drop below CT
  */
-static void iwl_tt_check_exit_ct_kill(unsigned long data)
+static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t,
+                                          thermal_throttle.ct_kill_exit_tm);
        struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
        unsigned long flags;
 
@@ -214,9 +215,10 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
        }
 }
 
-static void iwl_tt_ready_for_ct_kill(unsigned long data)
+static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
 {
-       struct iwl_priv *priv = (struct iwl_priv *)data;
+       struct iwl_priv *priv = from_timer(priv, t,
+                                          thermal_throttle.ct_kill_waiting_tm);
        struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -612,10 +614,10 @@ void iwl_tt_initialize(struct iwl_priv *priv)
        memset(tt, 0, sizeof(struct iwl_tt_mgmt));
 
        tt->state = IWL_TI_0;
-       setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
-                   iwl_tt_check_exit_ct_kill, (unsigned long)priv);
-       setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
-                   iwl_tt_ready_for_ct_kill, (unsigned long)priv);
+       timer_setup(&priv->thermal_throttle.ct_kill_exit_tm,
+                   iwl_tt_check_exit_ct_kill, 0);
+       timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm,
+                   iwl_tt_ready_for_ct_kill, 0);
        /* setup deferred ct kill work */
        INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
        INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
index 5a40092febfb611c8dc06d9b13288027f453f83e..3bfc657f6b4214da88f63a8bab326855dea2afca 100644 (file)
@@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
 } __packed; /* SCAN_CONFIG_DB_CMD_API_S */
 
 #define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
 
 struct iwl_scan_config {
        __le32 flags;
@@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
        IWL_UMAC_SCAN_GEN_FLAGS_MATCH                   = BIT(9),
        IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL          = BIT(10),
        IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED        = BIT(11),
+       IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL          = BIT(13),
 };
 
 /**
@@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @ooc_priority: out of channel priority - &enum iwl_scan_priority
  * @general_flags: &enum iwl_umac_scan_general_flags
- * @reserved2: for future use and alignment
  * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
  * @extended_dwell: dwell time for channels 1, 6 and 11
  * @active_dwell: dwell time for active scan
  * @passive_dwell: dwell time for passive scan
  * @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ *     per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ *     number of APs per social (1,6,11) channel
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ *     to total scan time
  * @max_out_time: max out of serving channel time, per LMAC - for CDB there
  *     are 2 LMACs
  * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
@@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
  * @channel_flags: &enum iwl_scan_channel_flags
  * @n_channels: num of channels in scan request
  * @reserved: for future use and alignment
+ * @reserved2: for future use and alignment
+ * @reserved3: for future use and alignment
  * @data: &struct iwl_scan_channel_cfg_umac and
  *     &struct iwl_scan_req_umac_tail
  */
@@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
        __le32 flags;
        __le32 uid;
        __le32 ooc_priority;
-       /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
        __le16 general_flags;
-       u8 reserved2;
+       u8 reserved;
        u8 scan_start_mac_id;
-       u8 extended_dwell;
-       u8 active_dwell;
-       u8 passive_dwell;
-       u8 fragmented_dwell;
        union {
                struct {
+                       u8 extended_dwell;
+                       u8 active_dwell;
+                       u8 passive_dwell;
+                       u8 fragmented_dwell;
                        __le32 max_out_time;
                        __le32 suspend_time;
                        __le32 scan_priority;
-                       /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+                       /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
                        u8 channel_flags;
                        u8 n_channels;
-                       __le16 reserved;
+                       __le16 reserved2;
                        u8 data[];
                } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
                struct {
+                       u8 extended_dwell;
+                       u8 active_dwell;
+                       u8 passive_dwell;
+                       u8 fragmented_dwell;
                        __le32 max_out_time[SCAN_TWO_LMACS];
                        __le32 suspend_time[SCAN_TWO_LMACS];
                        __le32 scan_priority;
-                       /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+                       /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
                        u8 channel_flags;
                        u8 n_channels;
-                       __le16 reserved;
+                       __le16 reserved2;
                        u8 data[];
                } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+               struct {
+                       u8 active_dwell;
+                       u8 passive_dwell;
+                       u8 fragmented_dwell;
+                       u8 adwell_default_n_aps;
+                       u8 adwell_default_n_aps_social;
+                       u8 reserved3;
+                       __le16 adwell_max_budget;
+                       __le32 max_out_time[SCAN_TWO_LMACS];
+                       __le32 suspend_time[SCAN_TWO_LMACS];
+                       __le32 scan_priority;
+                       /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+                       u8 channel_flags;
+                       u8 n_channels;
+                       __le16 reserved2;
+                       u8 data[];
+               } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
        };
 } __packed;
 
-#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
+                                  2 * sizeof(u8) - sizeof(__le16))
 #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
-                                  2 * sizeof(__le32))
+                                  2 * sizeof(__le32) - 2 * sizeof(u8) - \
+                                  sizeof(__le16))
 
 /**
  * struct iwl_umac_scan_abort
index 740d97093d1c5c8992b5f09f4274b445639de20b..37a5c5b4eda6ca48eb24c6978c8aeba640e1886d 100644 (file)
@@ -264,6 +264,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_STA_TYPE              = (__force iwl_ucode_tlv_api_t)30,
        IWL_UCODE_TLV_API_NAN2_VER2             = (__force iwl_ucode_tlv_api_t)31,
        /* API Set 1 */
+       IWL_UCODE_TLV_API_ADAPTIVE_DWELL        = (__force iwl_ucode_tlv_api_t)32,
        IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE   = (__force iwl_ucode_tlv_api_t)34,
        IWL_UCODE_TLV_API_NEW_RX_STATS          = (__force iwl_ucode_tlv_api_t)35,
        IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL     = (__force iwl_ucode_tlv_api_t)37,
index d1263a554420403d82d62c699f8e404c7f687757..e21e46cf6f9a3ebad9926826f2168c2434ed4fd4 100644 (file)
@@ -366,6 +366,7 @@ struct iwl_cfg {
        u32 dccm2_len;
        u32 smem_offset;
        u32 smem_len;
+       u32 soc_latency;
        u16 nvm_ver;
        u16 nvm_calib_ver;
        u16 rx_with_siso_diversity:1,
@@ -472,6 +473,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
 extern const struct iwl_cfg iwl9270_2ac_cfg;
 extern const struct iwl_cfg iwl9460_2ac_cfg;
 extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
 extern const struct iwl_cfg iwla000_2ac_cfg_hr;
 extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwla000_2ac_cfg_jf;
index 0e18c5066f04e2102114ebff9e433984fe880f60..4575595ab022600ff7d33da2d789c54f84a0e951 100644 (file)
@@ -1142,6 +1142,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
                            IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_api(&mvm->fw->ucode_capa,
+                         IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
 static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
 {
        /* For now we only use this mode to differentiate between
index 774122fed454fbb4d1d109a18a53634baf6f83e2..e4fd476e9ccb0888d572ee296a4947c9fd3903ca 100644 (file)
@@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
        u32 measurement_dwell;
 };
 
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+       struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+       if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+               return (void *)&cmd->v7.data;
+
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return (void *)&cmd->v6.data;
+
+       return (void *)&cmd->v1.data;
+}
+
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
 {
        if (mvm->scan_rx_ant != ANT_NONE)
@@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
 
+       if (iwl_mvm_is_regular_scan(params))
+               cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+       else
+               cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+
+       if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+               if (params->measurement_dwell) {
+                       cmd->v7.active_dwell = params->measurement_dwell;
+                       cmd->v7.passive_dwell = params->measurement_dwell;
+               } else {
+                       cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+                       cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+               }
+               cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+               cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+               cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+                       cpu_to_le32(timing->max_out_time);
+               cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+                       cpu_to_le32(timing->suspend_time);
+               if (iwl_mvm_is_cdb_supported(mvm)) {
+                       cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+                               cpu_to_le32(timing->max_out_time);
+                       cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+                               cpu_to_le32(timing->suspend_time);
+               }
+
+               return;
+       }
+
        if (params->measurement_dwell) {
-               cmd->active_dwell = params->measurement_dwell;
-               cmd->passive_dwell = params->measurement_dwell;
-               cmd->extended_dwell = params->measurement_dwell;
+               cmd->v1.active_dwell = params->measurement_dwell;
+               cmd->v1.passive_dwell = params->measurement_dwell;
+               cmd->v1.extended_dwell = params->measurement_dwell;
        } else {
-               cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
-               cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
-               cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+               cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+               cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+               cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
        }
-       cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+       cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
 
        if (iwl_mvm_has_new_tx_api(mvm)) {
                cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
-               cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
-               cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+               cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+                       cpu_to_le32(timing->max_out_time);
+               cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+                       cpu_to_le32(timing->suspend_time);
                if (iwl_mvm_is_cdb_supported(mvm)) {
-                       cmd->v6.max_out_time[1] =
+                       cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
                                cpu_to_le32(timing->max_out_time);
-                       cmd->v6.suspend_time[1] =
+                       cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
                                cpu_to_le32(timing->suspend_time);
                }
        } else {
@@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                cmd->v1.scan_priority =
                        cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
        }
-
-       if (iwl_mvm_is_regular_scan(params))
-               cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
-       else
-               cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
 }
 
 static void
@@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                             int type)
 {
        struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
-       void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
-                        (void *)&cmd->v6.data : (void *)&cmd->v1.data;
+       void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
        struct iwl_scan_req_umac_tail *sec_part = cmd_data +
                sizeof(struct iwl_scan_channel_cfg_umac) *
                        mvm->fw->ucode_capa.n_scan_channels;
@@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
                                IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
-       if (iwl_mvm_has_new_tx_api(mvm)) {
+       if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+               cmd->v7.channel_flags = channel_flags;
+               cmd->v7.n_channels = params->n_channels;
+       } else if (iwl_mvm_has_new_tx_api(mvm)) {
                cmd->v6.channel_flags = channel_flags;
                cmd->v6.n_channels = params->n_channels;
        } else {
@@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
 {
        int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
 
-       if (iwl_mvm_has_new_tx_api(mvm))
-               base_size = IWL_SCAN_REQ_UMAC_SIZE;
+       if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+               base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+       else if (iwl_mvm_has_new_tx_api(mvm))
+               base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                return base_size +
index 4a21c12276d7576093acb331d7da2cc4d466e9a8..f21fe59faccff835efe0db590c437f419a89ccb4 100644 (file)
@@ -535,47 +535,121 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
 
 /* a000 Series */
        {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
index b5c459cd70cecab890111b302f09b5e5a55c9e29..fed6d842a5e1dc444fe58ad203cb8fb5e3ab1c6f 100644 (file)
@@ -147,9 +147,9 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
        memset(ptr, 0, sizeof(*ptr));
 }
 
-static void iwl_pcie_txq_stuck_timer(unsigned long data)
+static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
 {
-       struct iwl_txq *txq = (void *)data;
+       struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
        struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
        struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
 
@@ -495,8 +495,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
        if (WARN_ON(txq->entries || txq->tfds))
                return -EINVAL;
 
-       setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
-                   (unsigned long)txq);
+       timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
        txq->trans_pcie = trans_pcie;
 
        txq->n_window = slots_num;
index 1a8d8db80b05405de0a2f1a7160c56cb2fd0fdc3..b4dfe1893d18445373e7b812931d376ebc17c65f 100644 (file)
@@ -185,9 +185,9 @@ static void hostap_event_expired_sta(struct net_device *dev,
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
 
-static void ap_handle_timer(unsigned long data)
+static void ap_handle_timer(struct timer_list *t)
 {
-       struct sta_info *sta = (struct sta_info *) data;
+       struct sta_info *sta = from_timer(sta, t, timer);
        local_info_t *local;
        struct ap_data *ap;
        unsigned long next_time = 0;
@@ -1189,10 +1189,8 @@ static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
        }
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-       init_timer(&sta->timer);
+       timer_setup(&sta->timer, ap_handle_timer, 0);
        sta->timer.expires = jiffies + ap->max_inactivity;
-       sta->timer.data = (unsigned long) sta;
-       sta->timer.function = ap_handle_timer;
        if (!ap->local->hostapd)
                add_timer(&sta->timer);
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
index 72b46eaf3de21ce1e6b6b43064f4488a61e2c4f5..5c4a17a18968bf0b0f0235b42fa577de6f9ed9d4 100644 (file)
@@ -2794,9 +2794,9 @@ static void prism2_check_sta_fw_version(local_info_t *local)
 }
 
 
-static void hostap_passive_scan(unsigned long data)
+static void hostap_passive_scan(struct timer_list *t)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = from_timer(local, t, passive_scan_timer);
        struct net_device *dev = local->dev;
        u16 chan;
 
@@ -2869,10 +2869,10 @@ static void handle_comms_qual_update(struct work_struct *work)
  * used to monitor that local->last_tick_timer is being updated. If not,
  * interrupt busy-loop is assumed and driver tries to recover by masking out
  * some events. */
-static void hostap_tick_timer(unsigned long data)
+static void hostap_tick_timer(struct timer_list *t)
 {
        static unsigned long last_inquire = 0;
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = from_timer(local, t, tick_timer);
        local->last_tick_timer = jiffies;
 
        /* Inquire CommTallies every 10 seconds to keep the statistics updated
@@ -3225,13 +3225,8 @@ while (0)
 
        lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
 
-       init_timer(&local->passive_scan_timer);
-       local->passive_scan_timer.data = (unsigned long) local;
-       local->passive_scan_timer.function = hostap_passive_scan;
-
-       init_timer(&local->tick_timer);
-       local->tick_timer.data = (unsigned long) local;
-       local->tick_timer.function = hostap_tick_timer;
+       timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0);
+       timer_setup(&local->tick_timer, hostap_tick_timer, 0);
        local->tick_timer.expires = jiffies + 2 * HZ;
        add_timer(&local->tick_timer);
 
index 501180584b4b197b84aa486a85cb1e8b993d0e80..94ad6fe29e69bdf199157e78023e15595e22daa9 100644 (file)
@@ -319,9 +319,9 @@ static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
        mod_timer(timer, expire);
 }
 
-static void ezusb_request_timerfn(u_long _ctx)
+static void ezusb_request_timerfn(struct timer_list *t)
 {
-       struct request_context *ctx = (void *) _ctx;
+       struct request_context *ctx = from_timer(ctx, t, timer);
 
        ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
        if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
@@ -365,7 +365,7 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
        refcount_set(&ctx->refcount, 1);
        init_completion(&ctx->done);
 
-       setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
+       timer_setup(&ctx->timer, ezusb_request_timerfn, 0);
        return ctx;
 }
 
index 07a49f58070aa061b763c8723bab99d335a9fab7..10b075a46b266218c53d1e5674c1789e1e0f3d80 100644 (file)
@@ -2805,7 +2805,7 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
                return -EMSGSIZE;
 
        if (cb)
-               genl_dump_check_consistent(cb, hdr, &hwsim_genl_family);
+               genl_dump_check_consistent(cb, hdr);
 
        if (data->alpha2[0] && data->alpha2[1])
                param.reg_alpha2 = data->alpha2;
@@ -3108,6 +3108,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
 {
        struct hwsim_new_radio_params param = { 0 };
        const char *hwname = NULL;
+       int ret;
 
        param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
        param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3147,7 +3148,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                param.regd = hwsim_world_regdom_custom[idx];
        }
 
-       return mac80211_hwsim_new_radio(info, &param);
+       ret = mac80211_hwsim_new_radio(info, &param);
+       kfree(hwname);
+       return ret;
 }
 
 static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
index 7d6dc76c930ad32265f00b00076cabe6a75bf3b6..6711e7fb69269c325a18328138c9e310d4a19cf4 100644 (file)
@@ -554,7 +554,7 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
                return -EFAULT;
        }
 
-       mac->scan_timeout.function = (TIMER_FUNC_TYPE)qtnf_scan_timeout;
+       mac->scan_timeout.function = qtnf_scan_timeout;
        mod_timer(&mac->scan_timeout,
                  jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
 
index 2d2c1ea65cb26440dc81efb2ae0e2c6851d8d6d6..3423dc51198b574cd863c72685c9d220a997f258 100644 (file)
@@ -288,7 +288,7 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
                mac->iflist[i].vifid = i;
                qtnf_sta_list_init(&mac->iflist[i].sta_list);
                mutex_init(&mac->mac_lock);
-               setup_timer(&mac->scan_timeout, NULL, 0);
+               timer_setup(&mac->scan_timeout, NULL, 0);
        }
 
        qtnf_mac_init_primary_intf(mac);
index d8afcdfca1ed6206dca11b8e86e8ba091b705659..0133fcd4601b2410b0d9b3b666f216b1860e3e37 100644 (file)
@@ -569,7 +569,7 @@ static int dl_startup_params(struct net_device *dev)
        local->card_status = CARD_DL_PARAM;
        /* Start kernel timer to wait for dl startup to complete. */
        local->timer.expires = jiffies + HZ / 2;
-       local->timer.function = (TIMER_FUNC_TYPE)verify_dl_startup;
+       local->timer.function = verify_dl_startup;
        add_timer(&local->timer);
        dev_dbg(&link->dev,
              "ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -1947,12 +1947,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
                                        dev_dbg(&link->dev,
                                              "ray_cs interrupt network \"%s\" start failed\n",
                                              memtmp);
-                                       local->timer.function = (TIMER_FUNC_TYPE)start_net;
+                                       local->timer.function = start_net;
                                } else {
                                        dev_dbg(&link->dev,
                                              "ray_cs interrupt network \"%s\" join failed\n",
                                              memtmp);
-                                       local->timer.function = (TIMER_FUNC_TYPE)join_net;
+                                       local->timer.function = join_net;
                                }
                                add_timer(&local->timer);
                        }
@@ -2417,9 +2417,9 @@ static void authenticate(ray_dev_t *local)
 
        del_timer(&local->timer);
        if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
-               local->timer.function = (TIMER_FUNC_TYPE)join_net;
+               local->timer.function = join_net;
        } else {
-               local->timer.function = (TIMER_FUNC_TYPE)authenticate_timeout;
+               local->timer.function = authenticate_timeout;
        }
        local->timer.expires = jiffies + HZ * 2;
        add_timer(&local->timer);
@@ -2502,7 +2502,7 @@ static void associate(ray_dev_t *local)
 
                del_timer(&local->timer);
                local->timer.expires = jiffies + HZ * 2;
-               local->timer.function = (TIMER_FUNC_TYPE)join_net;
+               local->timer.function = join_net;
                add_timer(&local->timer);
                local->card_status = CARD_ASSOC_FAILED;
                return;
index 08730227cd18f41c66d94bce296ea2bbf21befb0..8f84438333482c8c51ee32648f7aa37be5f5dc06 100644 (file)
@@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
        u8 *buf;
        int status = -ENOMEM;
 
+       if (len > RSI_USB_CTRL_BUF_SIZE)
+               return -EINVAL;
+
        buf  = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
        if (!buf)
                return status;
 
-       if (len > RSI_USB_CTRL_BUF_SIZE)
-               return -EINVAL;
-
        status = usb_control_msg(usbdev,
                                 usb_rcvctrlpipe(usbdev, 0),
                                 USB_VENDOR_REGISTER_READ,
@@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
        u8 *usb_reg_buf;
        int status = -ENOMEM;
 
+       if (len > RSI_USB_CTRL_BUF_SIZE)
+               return -EINVAL;
+
        usb_reg_buf  = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
        if (!usb_reg_buf)
                return status;
 
-       if (len > RSI_USB_CTRL_BUF_SIZE)
-               return -EINVAL;
-
        usb_reg_buf[0] = (value & 0x00ff);
        usb_reg_buf[1] = (value & 0xff00) >> 8;
        usb_reg_buf[2] = 0x0;
index 03687a80d6e983e4fd0b0a60db8c21695f595756..38678e9a05621e58644b6ed7f288f40af9f099b6 100644 (file)
@@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
 
                priv->bss_loss_state++;
 
-               skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+               skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
                WARN_ON(!skb);
                if (skb)
                        cw1200_tx(priv->hw, NULL, skb);
@@ -2265,7 +2265,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
                .rate = 0xFF,
        };
 
-       frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+       frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
        if (!frame.skb)
                return -ENOMEM;
 
index 9915d83a4a30550816fafbc741471e00fb694942..6d02c660b4ab785db914889c9819691c84b9a372 100644 (file)
@@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
                size = sizeof(struct wl12xx_null_data_template);
                ptr = NULL;
        } else {
-               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+               skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
                if (!skb)
                        goto out;
                size = skb->len;
index 2bfc12fdc9292be77622165d4777b7ab24346272..761cf8573a805e272121fa05bf129f1ee600a10a 100644 (file)
@@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                ptr = NULL;
        } else {
                skb = ieee80211_nullfunc_get(wl->hw,
-                                            wl12xx_wlvif_to_vif(wlvif));
+                                            wl12xx_wlvif_to_vif(wlvif),
+                                            false);
                if (!skb)
                        goto out;
                size = skb->len;
@@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
        struct sk_buff *skb = NULL;
        int ret = -ENOMEM;
 
-       skb = ieee80211_nullfunc_get(wl->hw, vif);
+       skb = ieee80211_nullfunc_get(wl->hw, vif, false);
        if (!skb)
                goto out;
 
index c346c021b99939f715c80250580d6f1775eb8953..d47921a845098815652a46f7fc16cdf8bccf3488 100644 (file)
@@ -196,9 +196,9 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static void wl1271_rx_streaming_timer(unsigned long data)
+static void wl1271_rx_streaming_timer(struct timer_list *t)
 {
-       struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+       struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
        struct wl1271 *wl = wlvif->wl;
        ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
 }
@@ -2279,8 +2279,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                          wlcore_pending_auth_complete_work);
        INIT_LIST_HEAD(&wlvif->list);
 
-       setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
-                   (unsigned long) wlvif);
+       timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
        return 0;
 }
 
index 8b8689c6d8877863dddad9f9c10215d311f5fa6f..c5a34671abdaf78a1257b869af817ecd148a6e0c 100644 (file)
@@ -87,6 +87,8 @@ struct netfront_cb {
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
+static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+
 struct netfront_stats {
        u64                     packets;
        u64                     bytes;
@@ -228,9 +230,9 @@ static bool xennet_can_sg(struct net_device *dev)
 }
 
 
-static void rx_refill_timeout(unsigned long data)
+static void rx_refill_timeout(struct timer_list *t)
 {
-       struct netfront_queue *queue = (struct netfront_queue *)data;
+       struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
        napi_schedule(&queue->napi);
 }
 
@@ -1605,8 +1607,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
        spin_lock_init(&queue->tx_lock);
        spin_lock_init(&queue->rx_lock);
 
-       setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
-                   (unsigned long)queue);
+       timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
 
        snprintf(queue->name, sizeof(queue->name), "%s-q%u",
                 queue->info->netdev->name, queue->id);
@@ -2021,10 +2022,12 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosed:
+               wake_up_all(&module_unload_q);
                if (dev->state == XenbusStateClosed)
                        break;
                /* Missed the backend's CLOSING state -- fallthrough */
        case XenbusStateClosing:
+               wake_up_all(&module_unload_q);
                xenbus_frontend_closed(dev);
                break;
        }
@@ -2130,6 +2133,20 @@ static int xennet_remove(struct xenbus_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
+       if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
+               xenbus_switch_state(dev, XenbusStateClosing);
+               wait_event(module_unload_q,
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateClosing);
+
+               xenbus_switch_state(dev, XenbusStateClosed);
+               wait_event(module_unload_q,
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateClosed ||
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateUnknown);
+       }
+
        xennet_disconnect_backend(info);
 
        unregister_netdev(info->netdev);
index 7f8960a46aab0a6a494458f2c0a89cca1716e26d..52c8ae504e328ad6f44f05db2d041b5775a27d04 100644 (file)
@@ -130,9 +130,9 @@ static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error)
        nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error);
 }
 
-static void fw_dnld_timeout(unsigned long arg)
+static void fw_dnld_timeout(struct timer_list *t)
 {
-       struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg;
+       struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
 
        nfc_err(priv->dev, "FW loading timeout");
        priv->fw_dnld.state = STATE_RESET;
@@ -538,8 +538,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
        }
 
        /* Configure a timer for timeout */
-       setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout,
-                   (unsigned long) priv);
+       timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0);
        mod_timer(&priv->fw_dnld.timer,
                  jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT));
 
index c05cb637ba9234371b24d4f7ede54ff7b4a6f0c3..a0cc1cc452927b54e84d749d053fe99e63463ca2 100644 (file)
@@ -1232,9 +1232,9 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
        return 0;
 }
 
-static void pn533_listen_mode_timer(unsigned long data)
+static void pn533_listen_mode_timer(struct timer_list *t)
 {
-       struct pn533 *dev = (struct pn533 *)data;
+       struct pn533 *dev = from_timer(dev, t, listen_timer);
 
        dev_dbg(dev->dev, "Listen mode timeout\n");
 
@@ -2632,9 +2632,7 @@ struct pn533 *pn533_register_device(u32 device_type,
        if (priv->wq == NULL)
                goto error;
 
-       init_timer(&priv->listen_timer);
-       priv->listen_timer.data = (unsigned long) priv;
-       priv->listen_timer.function = pn533_listen_mode_timer;
+       timer_setup(&priv->listen_timer, pn533_listen_mode_timer, 0);
 
        skb_queue_head_init(&priv->resp_q);
        skb_queue_head_init(&priv->fragment_skb);
index 9477994cf97534fe38b6b1e70b603c932081de6e..f26d938d240f03dbb27889fc101d552511ea9d92 100644 (file)
@@ -246,18 +246,18 @@ void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ndlc_recv);
 
-static void ndlc_t1_timeout(unsigned long data)
+static void ndlc_t1_timeout(struct timer_list *t)
 {
-       struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+       struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
 
        pr_debug("\n");
 
        schedule_work(&ndlc->sm_work);
 }
 
-static void ndlc_t2_timeout(unsigned long data)
+static void ndlc_t2_timeout(struct timer_list *t)
 {
-       struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+       struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
 
        pr_debug("\n");
 
@@ -282,13 +282,8 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
        *ndlc_id = ndlc;
 
        /* initialize timers */
-       init_timer(&ndlc->t1_timer);
-       ndlc->t1_timer.data = (unsigned long)ndlc;
-       ndlc->t1_timer.function = ndlc_t1_timeout;
-
-       init_timer(&ndlc->t2_timer);
-       ndlc->t2_timer.data = (unsigned long)ndlc;
-       ndlc->t2_timer.function = ndlc_t2_timeout;
+       timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0);
+       timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0);
 
        skb_queue_head_init(&ndlc->rcv_q);
        skb_queue_head_init(&ndlc->send_q);
index 56f2112e0cd840d789ec23b8db8ba1f2a52103ae..f55d082ace71558c8bf23d1813d70da18c9c5a0d 100644 (file)
@@ -677,7 +677,7 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
 }
 EXPORT_SYMBOL(st_nci_se_io);
 
-static void st_nci_se_wt_timeout(unsigned long data)
+static void st_nci_se_wt_timeout(struct timer_list *t)
 {
        /*
         * No answer from the secure element
@@ -690,7 +690,7 @@ static void st_nci_se_wt_timeout(unsigned long data)
         */
        /* hardware reset managed through VCC_UICC_OUT power supply */
        u8 param = 0x01;
-       struct st_nci_info *info = (struct st_nci_info *) data;
+       struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
 
        pr_debug("\n");
 
@@ -708,9 +708,10 @@ static void st_nci_se_wt_timeout(unsigned long data)
        info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
 }
 
-static void st_nci_se_activation_timeout(unsigned long data)
+static void st_nci_se_activation_timeout(struct timer_list *t)
 {
-       struct st_nci_info *info = (struct st_nci_info *) data;
+       struct st_nci_info *info = from_timer(info, t,
+                                             se_info.se_active_timer);
 
        pr_debug("\n");
 
@@ -725,15 +726,11 @@ int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status)
 
        init_completion(&info->se_info.req_completion);
        /* initialize timers */
-       init_timer(&info->se_info.bwi_timer);
-       info->se_info.bwi_timer.data = (unsigned long)info;
-       info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
+       timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0);
        info->se_info.bwi_active = false;
 
-       init_timer(&info->se_info.se_active_timer);
-       info->se_info.se_active_timer.data = (unsigned long)info;
-       info->se_info.se_active_timer.function =
-                       st_nci_se_activation_timeout;
+       timer_setup(&info->se_info.se_active_timer,
+                   st_nci_se_activation_timeout, 0);
        info->se_info.se_active = false;
 
        info->se_info.xch_error = false;
index 3a98563d4a121ddc99223ca367ee44a51f8fd92b..4bed9e842db38126859d74d4d585dee66ea80d33 100644 (file)
@@ -252,7 +252,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
 }
 EXPORT_SYMBOL(st21nfca_hci_se_io);
 
-static void st21nfca_se_wt_timeout(unsigned long data)
+static void st21nfca_se_wt_timeout(struct timer_list *t)
 {
        /*
         * No answer from the secure element
@@ -265,7 +265,8 @@ static void st21nfca_se_wt_timeout(unsigned long data)
         */
        /* hardware reset managed through VCC_UICC_OUT power supply */
        u8 param = 0x01;
-       struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+       struct st21nfca_hci_info *info = from_timer(info, t,
+                                                   se_info.bwi_timer);
 
        pr_debug("\n");
 
@@ -283,9 +284,10 @@ static void st21nfca_se_wt_timeout(unsigned long data)
        info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
 }
 
-static void st21nfca_se_activation_timeout(unsigned long data)
+static void st21nfca_se_activation_timeout(struct timer_list *t)
 {
-       struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+       struct st21nfca_hci_info *info = from_timer(info, t,
+                                                   se_info.se_active_timer);
 
        pr_debug("\n");
 
@@ -392,14 +394,11 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
 
        init_completion(&info->se_info.req_completion);
        /* initialize timers */
-       init_timer(&info->se_info.bwi_timer);
-       info->se_info.bwi_timer.data = (unsigned long)info;
-       info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+       timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
        info->se_info.bwi_active = false;
 
-       init_timer(&info->se_info.se_active_timer);
-       info->se_info.se_active_timer.data = (unsigned long)info;
-       info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+       timer_setup(&info->se_info.se_active_timer,
+                   st21nfca_se_activation_timeout, 0);
        info->se_info.se_active = false;
 
        info->se_info.count_pipes = 0;
index a89243c9fdd33acf6a583b0004513bbf45474001..e51b581fd102e8db1f0b0a0ed597f789a4ba9929 100644 (file)
@@ -1,3 +1,4 @@
 source "drivers/ntb/hw/amd/Kconfig"
 source "drivers/ntb/hw/idt/Kconfig"
 source "drivers/ntb/hw/intel/Kconfig"
+source "drivers/ntb/hw/mscc/Kconfig"
index 87332c3905f075dedd86ae7b86ea6ffd5dcb8d23..923c442db750a16caa3e1bad74cc168ecb9fcfc0 100644 (file)
@@ -1,3 +1,4 @@
 obj-$(CONFIG_NTB_AMD)  += amd/
 obj-$(CONFIG_NTB_IDT)  += idt/
 obj-$(CONFIG_NTB_INTEL)        += intel/
+obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
index d44d7ef38fe88fef82b7ff5420e117cd73d43edd..0cd79f367f7cc51d962679a236cfe56beec66ff9 100644 (file)
@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev)
 /*
  * IDT PCIe-switch models ports configuration structures
  */
-static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
        .name = "89HPES24NT6AG2",
        .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
 };
-static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
        .name = "89HPES32NT8AG2",
        .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
 };
-static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
        .name = "89HPES32NT8BG2",
        .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
 };
-static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
        .name = "89HPES12NT12G2",
        .port_cnt = 3, .ports = {0, 8, 16}
 };
-static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
        .name = "89HPES16NT16G2",
        .port_cnt = 4, .ports = {0, 8, 12, 16}
 };
-static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
        .name = "89HPES24NT24G2",
        .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
 };
-static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
        .name = "89HPES32NT24AG2",
        .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
 };
-static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
        .name = "89HPES32NT24BG2",
        .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
 };
index 2557e2c05b90c8ca770d176f0cde26162b9a2df5..4de074a86073604abc59353fb509c55f5ef0b4cf 100644 (file)
@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
 {
        struct pci_dev *pdev;
        void __iomem *mmio;
-       resource_size_t bar_size;
        phys_addr_t bar_addr;
-       int b2b_bar;
-       u8 bar_sz;
 
        pdev = ndev->ntb.pdev;
        mmio = ndev->self_mmio;
 
-       if (ndev->b2b_idx == UINT_MAX) {
-               dev_dbg(&pdev->dev, "not using b2b mw\n");
-               b2b_bar = 0;
-               ndev->b2b_off = 0;
-       } else {
-               b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
-               if (b2b_bar < 0)
-                       return -EIO;
-
-               dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
-
-               bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
-
-               dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
-
-               if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
-                       dev_dbg(&pdev->dev, "b2b using first half of bar\n");
-                       ndev->b2b_off = bar_size >> 1;
-               } else if (bar_size >= XEON_B2B_MIN_SIZE) {
-                       dev_dbg(&pdev->dev, "b2b using whole bar\n");
-                       ndev->b2b_off = 0;
-                       --ndev->mw_count;
-               } else {
-                       dev_dbg(&pdev->dev, "b2b bar size is too small\n");
-                       return -EIO;
-               }
-       }
-
-       /*
-        * Reset the secondary bar sizes to match the primary bar sizes,
-        * except disable or halve the size of the b2b secondary bar.
-        */
-       pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
-       dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
-       if (b2b_bar == 1) {
-               if (ndev->b2b_off)
-                       bar_sz -= 1;
-               else
-                       bar_sz = 0;
-       }
-
-       pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
-       pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
-       dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
-
-       pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
-       dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
-       if (b2b_bar == 2) {
-               if (ndev->b2b_off)
-                       bar_sz -= 1;
-               else
-                       bar_sz = 0;
-       }
-
-       pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
-       pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
-       dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
-
-       /* SBAR01 hit by first part of the b2b bar */
-       if (b2b_bar == 0)
-               bar_addr = addr->bar0_addr;
-       else if (b2b_bar == 1)
-               bar_addr = addr->bar2_addr64;
-       else if (b2b_bar == 2)
-               bar_addr = addr->bar4_addr64;
-       else
-               return -EIO;
-
        /* setup incoming bar limits == base addrs (zero length windows) */
-       bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
+       bar_addr = addr->bar2_addr64;
        iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
        bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
        dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
 
-       bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+       bar_addr = addr->bar4_addr64;
        iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
        bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
        dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
diff --git a/drivers/ntb/hw/mscc/Kconfig b/drivers/ntb/hw/mscc/Kconfig
new file mode 100644 (file)
index 0000000..013ed67
--- /dev/null
@@ -0,0 +1,9 @@
+config NTB_SWITCHTEC
+       tristate "MicroSemi Switchtec Non-Transparent Bridge Support"
+       select PCI_SW_SWITCHTEC
+       help
+        Enables NTB support for Switchtec PCI switches. This also
+        selects the Switchtec management driver as they share the same
+        hardware interface.
+
+        If unsure, say N.
diff --git a/drivers/ntb/hw/mscc/Makefile b/drivers/ntb/hw/mscc/Makefile
new file mode 100644 (file)
index 0000000..064686e
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
new file mode 100644 (file)
index 0000000..afe8ed6
--- /dev/null
@@ -0,0 +1,1216 @@
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/switchtec.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/ntb.h>
+
+MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Microsemi Corporation");
+
+static ulong max_mw_size = SZ_2M;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size,
+       "Max memory window size reported to the upper layer");
+
+static bool use_lut_mws;
+module_param(use_lut_mws, bool, 0644);
+MODULE_PARM_DESC(use_lut_mws,
+                "Enable the use of the LUT based memory windows");
+
+#ifndef ioread64
+#ifdef readq
+#define ioread64 readq
+#else
+#define ioread64 _ioread64
+static inline u64 _ioread64(void __iomem *mmio)
+{
+       u64 low, high;
+
+       low = ioread32(mmio);
+       high = ioread32(mmio + sizeof(u32));
+       return low | (high << 32);
+}
+#endif
+#endif
+
+#ifndef iowrite64
+#ifdef writeq
+#define iowrite64 writeq
+#else
+#define iowrite64 _iowrite64
+static inline void _iowrite64(u64 val, void __iomem *mmio)
+{
+       iowrite32(val, mmio);
+       iowrite32(val >> 32, mmio + sizeof(u32));
+}
+#endif
+#endif
+
+#define SWITCHTEC_NTB_MAGIC 0x45CC0001
+#define MAX_MWS     128
+
+struct shared_mw {
+       u32 magic;
+       u32 link_sta;
+       u32 partition_id;
+       u64 mw_sizes[MAX_MWS];
+       u32 spad[128];
+};
+
+#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
+#define LUT_SIZE SZ_64K
+
+struct switchtec_ntb {
+       struct ntb_dev ntb;
+       struct switchtec_dev *stdev;
+
+       int self_partition;
+       int peer_partition;
+
+       int doorbell_irq;
+       int message_irq;
+
+       struct ntb_info_regs __iomem *mmio_ntb;
+       struct ntb_ctrl_regs __iomem *mmio_ctrl;
+       struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
+       struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
+       struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
+       struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
+
+       struct shared_mw *self_shared;
+       struct shared_mw __iomem *peer_shared;
+       dma_addr_t self_shared_dma;
+
+       u64 db_mask;
+       u64 db_valid_mask;
+       int db_shift;
+       int db_peer_shift;
+
+       /* synchronize rmw access of db_mask and hw reg */
+       spinlock_t db_mask_lock;
+
+       int nr_direct_mw;
+       int nr_lut_mw;
+       int direct_mw_to_bar[MAX_DIRECT_MW];
+
+       int peer_nr_direct_mw;
+       int peer_nr_lut_mw;
+       int peer_direct_mw_to_bar[MAX_DIRECT_MW];
+
+       bool link_is_up;
+       enum ntb_speed link_speed;
+       enum ntb_width link_width;
+};
+
+static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
+{
+       return container_of(ntb, struct switchtec_ntb, ntb);
+}
+
+static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
+                                struct ntb_ctrl_regs __iomem *ctl,
+                                u32 op, int wait_status)
+{
+       static const char * const op_text[] = {
+               [NTB_CTRL_PART_OP_LOCK] = "lock",
+               [NTB_CTRL_PART_OP_CFG] = "configure",
+               [NTB_CTRL_PART_OP_RESET] = "reset",
+       };
+
+       int i;
+       u32 ps;
+       int status;
+
+       switch (op) {
+       case NTB_CTRL_PART_OP_LOCK:
+               status = NTB_CTRL_PART_STATUS_LOCKING;
+               break;
+       case NTB_CTRL_PART_OP_CFG:
+               status = NTB_CTRL_PART_STATUS_CONFIGURING;
+               break;
+       case NTB_CTRL_PART_OP_RESET:
+               status = NTB_CTRL_PART_STATUS_RESETTING;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       iowrite32(op, &ctl->partition_op);
+
+       for (i = 0; i < 1000; i++) {
+               if (msleep_interruptible(50) != 0) {
+                       iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
+                       return -EINTR;
+               }
+
+               ps = ioread32(&ctl->partition_status) & 0xFFFF;
+
+               if (ps != status)
+                       break;
+       }
+
+       if (ps == wait_status)
+               return 0;
+
+       if (ps == status) {
+               dev_err(&sndev->stdev->dev,
+                       "Timed out while peforming %s (%d). (%08x)",
+                       op_text[op], op,
+                       ioread32(&ctl->partition_status));
+
+               return -ETIMEDOUT;
+       }
+
+       return -EIO;
+}
+
+static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
+                                 u32 val)
+{
+       if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
+               return -EINVAL;
+
+       iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
+
+       return 0;
+}
+
+static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+       int nr_direct_mw = sndev->peer_nr_direct_mw;
+       int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       if (!use_lut_mws)
+               nr_lut_mw = 0;
+
+       return nr_direct_mw + nr_lut_mw;
+}
+
+static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
+{
+       return mw_idx - sndev->nr_direct_mw + 1;
+}
+
+static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
+{
+       return mw_idx - sndev->peer_nr_direct_mw + 1;
+}
+
+static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
+                                     int widx, resource_size_t *addr_align,
+                                     resource_size_t *size_align,
+                                     resource_size_t *size_max)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+       int lut;
+       resource_size_t size;
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       lut = widx >= sndev->peer_nr_direct_mw;
+       size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
+
+       if (size == 0)
+               return -EINVAL;
+
+       if (addr_align)
+               *addr_align = lut ? size : SZ_4K;
+
+       if (size_align)
+               *size_align = lut ? size : SZ_4K;
+
+       if (size_max)
+               *size_max = size;
+
+       return 0;
+}
+
+static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
+{
+       struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+       int bar = sndev->peer_direct_mw_to_bar[idx];
+       u32 ctl_val;
+
+       ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+       ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
+       iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+       iowrite32(0, &ctl->bar_entry[bar].win_size);
+       iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
+}
+
+static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
+{
+       struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+
+       iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
+}
+
+static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
+                                       dma_addr_t addr, resource_size_t size)
+{
+       int xlate_pos = ilog2(size);
+       int bar = sndev->peer_direct_mw_to_bar[idx];
+       struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+       u32 ctl_val;
+
+       ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+       ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
+
+       iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+       iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
+       iowrite64(sndev->self_partition | addr,
+                 &ctl->bar_entry[bar].xlate_addr);
+}
+
+static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
+                                    dma_addr_t addr, resource_size_t size)
+{
+       struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+
+       iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
+                 &ctl->lut_entry[peer_lut_index(sndev, idx)]);
+}
+
+static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+                                     dma_addr_t addr, resource_size_t size)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+       struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+       int xlate_pos = ilog2(size);
+       int nr_direct_mw = sndev->peer_nr_direct_mw;
+       int rc;
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap",
+               widx, pidx, &addr, &size);
+
+       if (widx >= switchtec_ntb_mw_count(ntb, pidx))
+               return -EINVAL;
+
+       if (xlate_pos < 12)
+               return -EINVAL;
+
+       rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
+                                  NTB_CTRL_PART_STATUS_LOCKED);
+       if (rc)
+               return rc;
+
+       if (addr == 0 || size == 0) {
+               if (widx < nr_direct_mw)
+                       switchtec_ntb_mw_clr_direct(sndev, widx);
+               else
+                       switchtec_ntb_mw_clr_lut(sndev, widx);
+       } else {
+               if (widx < nr_direct_mw)
+                       switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
+               else
+                       switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
+       }
+
+       rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+                                  NTB_CTRL_PART_STATUS_NORMAL);
+
+       if (rc == -EIO) {
+               dev_err(&sndev->stdev->dev,
+                       "Hardware reported an error configuring mw %d: %08x",
+                       widx, ioread32(&ctl->bar_error));
+
+               if (widx < nr_direct_mw)
+                       switchtec_ntb_mw_clr_direct(sndev, widx);
+               else
+                       switchtec_ntb_mw_clr_lut(sndev, widx);
+
+               switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+                                     NTB_CTRL_PART_STATUS_NORMAL);
+       }
+
+       return rc;
+}
+
+static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
+}
+
+static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
+                                        int idx, phys_addr_t *base,
+                                        resource_size_t *size)
+{
+       int bar = sndev->direct_mw_to_bar[idx];
+       size_t offset = 0;
+
+       if (bar < 0)
+               return -EINVAL;
+
+       if (idx == 0) {
+               /*
+                * This is the direct BAR shared with the LUTs
+                * which means the actual window will be offset
+                * by the size of all the LUT entries.
+                */
+
+               offset = LUT_SIZE * sndev->nr_lut_mw;
+       }
+
+       if (base)
+               *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
+
+       if (size) {
+               *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
+               if (offset && *size > offset)
+                       *size = offset;
+
+               if (*size > max_mw_size)
+                       *size = max_mw_size;
+       }
+
+       return 0;
+}
+
+static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
+                                     int idx, phys_addr_t *base,
+                                     resource_size_t *size)
+{
+       int bar = sndev->direct_mw_to_bar[0];
+       int offset;
+
+       offset = LUT_SIZE * lut_index(sndev, idx);
+
+       if (base)
+               *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
+
+       if (size)
+               *size = LUT_SIZE;
+
+       return 0;
+}
+
+static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+                                         phys_addr_t *base,
+                                         resource_size_t *size)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (idx < sndev->nr_direct_mw)
+               return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
+       else if (idx < switchtec_ntb_peer_mw_count(ntb))
+               return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
+       else
+               return -EINVAL;
+}
+
+static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
+                                         int partition,
+                                         enum ntb_speed *speed,
+                                         enum ntb_width *width)
+{
+       struct switchtec_dev *stdev = sndev->stdev;
+
+       u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
+       u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
+
+       if (speed)
+               *speed = (linksta >> 16) & 0xF;
+
+       if (width)
+               *width = (linksta >> 20) & 0x3F;
+}
+
+static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
+{
+       enum ntb_speed self_speed, peer_speed;
+       enum ntb_width self_width, peer_width;
+
+       if (!sndev->link_is_up) {
+               sndev->link_speed = NTB_SPEED_NONE;
+               sndev->link_width = NTB_WIDTH_NONE;
+               return;
+       }
+
+       switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
+                                     &self_speed, &self_width);
+       switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
+                                     &peer_speed, &peer_width);
+
+       sndev->link_speed = min(self_speed, peer_speed);
+       sndev->link_width = min(self_width, peer_width);
+}
+
+enum {
+       LINK_MESSAGE = 0,
+       MSG_LINK_UP = 1,
+       MSG_LINK_DOWN = 2,
+       MSG_CHECK_LINK = 3,
+};
+
+static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
+{
+       int link_sta;
+       int old = sndev->link_is_up;
+
+       link_sta = sndev->self_shared->link_sta;
+       if (link_sta) {
+               u64 peer = ioread64(&sndev->peer_shared->magic);
+
+               if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
+                       link_sta = peer >> 32;
+               else
+                       link_sta = 0;
+       }
+
+       sndev->link_is_up = link_sta;
+       switchtec_ntb_set_link_speed(sndev);
+
+       if (link_sta != old) {
+               switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
+               ntb_link_event(&sndev->ntb);
+               dev_info(&sndev->stdev->dev, "ntb link %s",
+                        link_sta ? "up" : "down");
+       }
+}
+
+static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
+{
+       struct switchtec_ntb *sndev = stdev->sndev;
+
+       switchtec_ntb_check_link(sndev);
+}
+
+static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
+                                   enum ntb_speed *speed,
+                                   enum ntb_width *width)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (speed)
+               *speed = sndev->link_speed;
+       if (width)
+               *width = sndev->link_width;
+
+       return sndev->link_is_up;
+}
+
+static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
+                                    enum ntb_speed max_speed,
+                                    enum ntb_width max_width)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       dev_dbg(&sndev->stdev->dev, "enabling link");
+
+       sndev->self_shared->link_sta = 1;
+       switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
+
+       switchtec_ntb_check_link(sndev);
+
+       return 0;
+}
+
+static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       dev_dbg(&sndev->stdev->dev, "disabling link");
+
+       sndev->self_shared->link_sta = 0;
+       switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
+
+       switchtec_ntb_check_link(sndev);
+
+       return 0;
+}
+
+static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       return sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
+{
+       return 1;
+}
+
+static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (db_vector < 0 || db_vector > 1)
+               return 0;
+
+       return sndev->db_valid_mask;
+}
+
+static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
+{
+       u64 ret;
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
+
+       return ret & sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
+
+       return 0;
+}
+
+static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       unsigned long irqflags;
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (db_bits & ~sndev->db_valid_mask)
+               return -EINVAL;
+
+       spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
+
+       sndev->db_mask |= db_bits << sndev->db_shift;
+       iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+
+       spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
+
+       return 0;
+}
+
+static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+       unsigned long irqflags;
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (db_bits & ~sndev->db_valid_mask)
+               return -EINVAL;
+
+       spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
+
+       sndev->db_mask &= ~(db_bits << sndev->db_shift);
+       iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+
+       spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
+
+       return 0;
+}
+
+static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
+                                     phys_addr_t *db_addr,
+                                     resource_size_t *db_size)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+       unsigned long offset;
+
+       offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
+               (unsigned long)sndev->stdev->mmio;
+
+       offset += sndev->db_shift / 8;
+
+       if (db_addr)
+               *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
+       if (db_size)
+               *db_size = sizeof(u32);
+
+       return 0;
+}
+
+static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       iowrite64(db_bits << sndev->db_peer_shift,
+                 &sndev->mmio_self_dbmsg->odb);
+
+       return 0;
+}
+
+static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       return ARRAY_SIZE(sndev->self_shared->spad);
+}
+
+static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
+               return 0;
+
+       if (!sndev->self_shared)
+               return 0;
+
+       return sndev->self_shared->spad[idx];
+}
+
+static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
+               return -EINVAL;
+
+       if (!sndev->self_shared)
+               return -EIO;
+
+       sndev->self_shared->spad[idx] = val;
+
+       return 0;
+}
+
+static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
+                                       int sidx)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
+               return 0;
+
+       if (!sndev->peer_shared)
+               return 0;
+
+       return ioread32(&sndev->peer_shared->spad[sidx]);
+}
+
+static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
+                                        int sidx, u32 val)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
+               return -EINVAL;
+
+       if (!sndev->peer_shared)
+               return -EIO;
+
+       iowrite32(val, &sndev->peer_shared->spad[sidx]);
+
+       return 0;
+}
+
+static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
+                                       int sidx, phys_addr_t *spad_addr)
+{
+       struct switchtec_ntb *sndev = ntb_sndev(ntb);
+       unsigned long offset;
+
+       if (pidx != NTB_DEF_PEER_IDX)
+               return -EINVAL;
+
+       offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
+               (unsigned long)sndev->stdev->mmio;
+
+       if (spad_addr)
+               *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
+
+       return 0;
+}
+
+static const struct ntb_dev_ops switchtec_ntb_ops = {
+       .mw_count               = switchtec_ntb_mw_count,
+       .mw_get_align           = switchtec_ntb_mw_get_align,
+       .mw_set_trans           = switchtec_ntb_mw_set_trans,
+       .peer_mw_count          = switchtec_ntb_peer_mw_count,
+       .peer_mw_get_addr       = switchtec_ntb_peer_mw_get_addr,
+       .link_is_up             = switchtec_ntb_link_is_up,
+       .link_enable            = switchtec_ntb_link_enable,
+       .link_disable           = switchtec_ntb_link_disable,
+       .db_valid_mask          = switchtec_ntb_db_valid_mask,
+       .db_vector_count        = switchtec_ntb_db_vector_count,
+       .db_vector_mask         = switchtec_ntb_db_vector_mask,
+       .db_read                = switchtec_ntb_db_read,
+       .db_clear               = switchtec_ntb_db_clear,
+       .db_set_mask            = switchtec_ntb_db_set_mask,
+       .db_clear_mask          = switchtec_ntb_db_clear_mask,
+       .db_read_mask           = switchtec_ntb_db_read_mask,
+       .peer_db_addr           = switchtec_ntb_peer_db_addr,
+       .peer_db_set            = switchtec_ntb_peer_db_set,
+       .spad_count             = switchtec_ntb_spad_count,
+       .spad_read              = switchtec_ntb_spad_read,
+       .spad_write             = switchtec_ntb_spad_write,
+       .peer_spad_read         = switchtec_ntb_peer_spad_read,
+       .peer_spad_write        = switchtec_ntb_peer_spad_write,
+       .peer_spad_addr         = switchtec_ntb_peer_spad_addr,
+};
+
+static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
+{
+       u64 part_map;
+
+       sndev->ntb.pdev = sndev->stdev->pdev;
+       sndev->ntb.topo = NTB_TOPO_SWITCH;
+       sndev->ntb.ops = &switchtec_ntb_ops;
+
+       sndev->self_partition = sndev->stdev->partition;
+
+       sndev->mmio_ntb = sndev->stdev->mmio_ntb;
+       part_map = ioread64(&sndev->mmio_ntb->ep_map);
+       part_map &= ~(1 << sndev->self_partition);
+       sndev->peer_partition = ffs(part_map) - 1;
+
+       dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
+               sndev->self_partition, sndev->stdev->partition_count,
+               part_map);
+
+       sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
+               SWITCHTEC_NTB_REG_CTRL_OFFSET;
+       sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
+               SWITCHTEC_NTB_REG_DBMSG_OFFSET;
+
+       sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
+       sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
+       sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
+}
+
+static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
+{
+       int i;
+       int cnt = 0;
+
+       for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
+               u32 r = ioread32(&ctrl->bar_entry[i].ctl);
+
+               if (r & NTB_CTRL_BAR_VALID)
+                       map[cnt++] = i;
+       }
+
+       return cnt;
+}
+
+static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
+{
+       sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
+                                      sndev->mmio_self_ctrl);
+
+       sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
+       sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
+
+       dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
+               sndev->nr_direct_mw, sndev->nr_lut_mw);
+
+       sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
+                                           sndev->mmio_peer_ctrl);
+
+       sndev->peer_nr_lut_mw =
+               ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
+       sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
+
+       dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
+               sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
+
+}
+
+/*
+ * There are 64 doorbells in the switch hardware but this is
+ * shared among all partitions. So we must split them in half
+ * (32 for each partition). However, the message interrupts are
+ * also shared with the top 4 doorbells so we just limit this to
+ * 28 doorbells per partition
+ */
+static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
+{
+       sndev->db_valid_mask = 0x0FFFFFFF;
+
+       if (sndev->self_partition < sndev->peer_partition) {
+               sndev->db_shift = 0;
+               sndev->db_peer_shift = 32;
+       } else {
+               sndev->db_shift = 32;
+               sndev->db_peer_shift = 0;
+       }
+
+       sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
+       iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+       iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
+                 &sndev->mmio_self_dbmsg->odb_mask);
+}
+
+static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
+{
+       int i;
+       u32 msg_map = 0;
+
+       for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
+               int m = i | sndev->peer_partition << 2;
+
+               msg_map |= m << i * 8;
+       }
+
+       iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
+
+       for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
+               iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
+                         &sndev->mmio_self_dbmsg->imsg[i]);
+}
+
+static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
+{
+       int rc = 0;
+       u16 req_id;
+       u32 error;
+
+       req_id = ioread16(&sndev->mmio_ntb->requester_id);
+
+       if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
+               dev_err(&sndev->stdev->dev,
+                       "Not enough requester IDs available.");
+               return -EFAULT;
+       }
+
+       rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
+                                  NTB_CTRL_PART_OP_LOCK,
+                                  NTB_CTRL_PART_STATUS_LOCKED);
+       if (rc)
+               return rc;
+
+       iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
+                 &sndev->mmio_self_ctrl->partition_ctrl);
+
+       /*
+        * Root Complex Requester ID (which is 0:00.0)
+        */
+       iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
+                 &sndev->mmio_self_ctrl->req_id_table[0]);
+
+       /*
+        * Host Bridge Requester ID (as read from the mmap address)
+        */
+       iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
+                 &sndev->mmio_self_ctrl->req_id_table[1]);
+
+       rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
+                                  NTB_CTRL_PART_OP_CFG,
+                                  NTB_CTRL_PART_STATUS_NORMAL);
+       if (rc == -EIO) {
+               error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
+               dev_err(&sndev->stdev->dev,
+                       "Error setting up the requester ID table: %08x",
+                       error);
+       }
+
+       return rc;
+}
+
+static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
+{
+       int i;
+
+       memset(sndev->self_shared, 0, LUT_SIZE);
+       sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
+       sndev->self_shared->partition_id = sndev->stdev->partition;
+
+       for (i = 0; i < sndev->nr_direct_mw; i++) {
+               int bar = sndev->direct_mw_to_bar[i];
+               resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
+
+               if (i == 0)
+                       sz = min_t(resource_size_t, sz,
+                                  LUT_SIZE * sndev->nr_lut_mw);
+
+               sndev->self_shared->mw_sizes[i] = sz;
+       }
+
+       for (i = 0; i < sndev->nr_lut_mw; i++) {
+               int idx = sndev->nr_direct_mw + i;
+
+               sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
+       }
+}
+
+static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
+{
+       struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+       int bar = sndev->direct_mw_to_bar[0];
+       u32 ctl_val;
+       int rc;
+
+       sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
+                                                LUT_SIZE,
+                                                &sndev->self_shared_dma,
+                                                GFP_KERNEL);
+       if (!sndev->self_shared) {
+               dev_err(&sndev->stdev->dev,
+                       "unable to allocate memory for shared mw");
+               return -ENOMEM;
+       }
+
+       switchtec_ntb_init_shared(sndev);
+
+       rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
+                                  NTB_CTRL_PART_STATUS_LOCKED);
+       if (rc)
+               goto unalloc_and_exit;
+
+       ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+       ctl_val &= 0xFF;
+       ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
+       ctl_val |= ilog2(LUT_SIZE) << 8;
+       ctl_val |= (sndev->nr_lut_mw - 1) << 14;
+       iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+
+       iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
+                  sndev->self_shared_dma),
+                 &ctl->lut_entry[0]);
+
+       rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+                                  NTB_CTRL_PART_STATUS_NORMAL);
+       if (rc) {
+               u32 bar_error, lut_error;
+
+               bar_error = ioread32(&ctl->bar_error);
+               lut_error = ioread32(&ctl->lut_error);
+               dev_err(&sndev->stdev->dev,
+                       "Error setting up shared MW: %08x / %08x",
+                       bar_error, lut_error);
+               goto unalloc_and_exit;
+       }
+
+       sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
+       if (!sndev->peer_shared) {
+               rc = -ENOMEM;
+               goto unalloc_and_exit;
+       }
+
+       dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
+       return 0;
+
+unalloc_and_exit:
+       dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
+                         sndev->self_shared, sndev->self_shared_dma);
+
+       return rc;
+}
+
+static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
+{
+       if (sndev->peer_shared)
+               pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
+
+       if (sndev->self_shared)
+               dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
+                                 sndev->self_shared,
+                                 sndev->self_shared_dma);
+}
+
+static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
+{
+       struct switchtec_ntb *sndev = dev;
+
+       dev_dbg(&sndev->stdev->dev, "doorbell\n");
+
+       ntb_db_event(&sndev->ntb, 0);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
+{
+       int i;
+       struct switchtec_ntb *sndev = dev;
+
+       for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
+               u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
+
+               if (msg & NTB_DBMSG_IMSG_STATUS) {
+                       dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
+                               (u32)msg);
+                       iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
+
+                       if (i == LINK_MESSAGE)
+                               switchtec_ntb_check_link(sndev);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
+{
+       int i;
+       int rc;
+       int doorbell_irq = 0;
+       int message_irq = 0;
+       int event_irq;
+       int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
+
+       event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
+
+       while (doorbell_irq == event_irq)
+               doorbell_irq++;
+       while (message_irq == doorbell_irq ||
+              message_irq == event_irq)
+               message_irq++;
+
+       dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
+               event_irq, doorbell_irq, message_irq);
+
+       for (i = 0; i < idb_vecs - 4; i++)
+               iowrite8(doorbell_irq,
+                        &sndev->mmio_self_dbmsg->idb_vec_map[i]);
+
+       for (; i < idb_vecs; i++)
+               iowrite8(message_irq,
+                        &sndev->mmio_self_dbmsg->idb_vec_map[i]);
+
+       sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
+       sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
+
+       rc = request_irq(sndev->doorbell_irq,
+                        switchtec_ntb_doorbell_isr, 0,
+                        "switchtec_ntb_doorbell", sndev);
+       if (rc)
+               return rc;
+
+       rc = request_irq(sndev->message_irq,
+                        switchtec_ntb_message_isr, 0,
+                        "switchtec_ntb_message", sndev);
+       if (rc) {
+               free_irq(sndev->doorbell_irq, sndev);
+               return rc;
+       }
+
+       return 0;
+}
+
+static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
+{
+       free_irq(sndev->doorbell_irq, sndev);
+       free_irq(sndev->message_irq, sndev);
+}
+
+static int switchtec_ntb_add(struct device *dev,
+                            struct class_interface *class_intf)
+{
+       struct switchtec_dev *stdev = to_stdev(dev);
+       struct switchtec_ntb *sndev;
+       int rc;
+
+       stdev->sndev = NULL;
+
+       if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
+               return -ENODEV;
+
+       if (stdev->partition_count != 2)
+               dev_warn(dev, "ntb driver only supports 2 partitions");
+
+       sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
+       if (!sndev)
+               return -ENOMEM;
+
+       sndev->stdev = stdev;
+       switchtec_ntb_init_sndev(sndev);
+       switchtec_ntb_init_mw(sndev);
+       switchtec_ntb_init_db(sndev);
+       switchtec_ntb_init_msgs(sndev);
+
+       rc = switchtec_ntb_init_req_id_table(sndev);
+       if (rc)
+               goto free_and_exit;
+
+       rc = switchtec_ntb_init_shared_mw(sndev);
+       if (rc)
+               goto free_and_exit;
+
+       rc = switchtec_ntb_init_db_msg_irq(sndev);
+       if (rc)
+               goto deinit_shared_and_exit;
+
+       rc = ntb_register_device(&sndev->ntb);
+       if (rc)
+               goto deinit_and_exit;
+
+       stdev->sndev = sndev;
+       stdev->link_notifier = switchtec_ntb_link_notification;
+       dev_info(dev, "NTB device registered");
+
+       return 0;
+
+deinit_and_exit:
+       switchtec_ntb_deinit_db_msg_irq(sndev);
+deinit_shared_and_exit:
+       switchtec_ntb_deinit_shared_mw(sndev);
+free_and_exit:
+       kfree(sndev);
+       dev_err(dev, "failed to register ntb device: %d", rc);
+       return rc;
+}
+
+void switchtec_ntb_remove(struct device *dev,
+                         struct class_interface *class_intf)
+{
+       struct switchtec_dev *stdev = to_stdev(dev);
+       struct switchtec_ntb *sndev = stdev->sndev;
+
+       if (!sndev)
+               return;
+
+       stdev->link_notifier = NULL;
+       stdev->sndev = NULL;
+       ntb_unregister_device(&sndev->ntb);
+       switchtec_ntb_deinit_db_msg_irq(sndev);
+       switchtec_ntb_deinit_shared_mw(sndev);
+       kfree(sndev);
+       dev_info(dev, "ntb device unregistered");
+}
+
+static struct class_interface switchtec_interface  = {
+       .add_dev = switchtec_ntb_add,
+       .remove_dev = switchtec_ntb_remove,
+};
+
+static int __init switchtec_ntb_init(void)
+{
+       switchtec_interface.class = switchtec_class;
+       return class_interface_register(&switchtec_interface);
+}
+module_init(switchtec_ntb_init);
+
+static void __exit switchtec_ntb_exit(void)
+{
+       class_interface_unregister(&switchtec_interface);
+}
+module_exit(switchtec_ntb_exit);
index f58d8e3053236ad4608e5552052214b0694a3a95..045e3dd4750e572a033dbb0cf2683296b2d0e6c7 100644 (file)
@@ -191,8 +191,6 @@ struct ntb_transport_qp {
 struct ntb_transport_mw {
        phys_addr_t phys_addr;
        resource_size_t phys_size;
-       resource_size_t xlat_align;
-       resource_size_t xlat_align_size;
        void __iomem *vbase;
        size_t xlat_size;
        size_t buff_size;
@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
        struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
        struct pci_dev *pdev = nt->ndev->pdev;
        size_t xlat_size, buff_size;
+       resource_size_t xlat_align;
+       resource_size_t xlat_align_size;
        int rc;
 
        if (!size)
                return -EINVAL;
 
-       xlat_size = round_up(size, mw->xlat_align_size);
-       buff_size = round_up(size, mw->xlat_align);
+       rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
+                             &xlat_align_size, NULL);
+       if (rc)
+               return rc;
+
+       xlat_size = round_up(size, xlat_align_size);
+       buff_size = round_up(size, xlat_align);
 
        /* No need to re-setup */
        if (mw->xlat_size == xlat_size)
@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
         * is a requirement of the hardware. It is recommended to setup CMA
         * for BAR sizes equal or greater than 4MB.
         */
-       if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
+       if (!IS_ALIGNED(mw->dma_addr, xlat_align)) {
                dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
                        &mw->dma_addr);
                ntb_free_mw(nt, num_mw);
@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
        for (i = 0; i < mw_count; i++) {
                mw = &nt->mw_vec[i];
 
-               rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
-                                     &mw->xlat_align_size, NULL);
-               if (rc)
-                       goto err1;
-
                rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
                                          &mw->phys_size);
                if (rc)
index 759f772fa00c6a3970eacaaa05de88155bcf6c8d..427112cf101aa814ee210cd77d59b97c8ff90f33 100644 (file)
@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)")
 struct perf_mw {
        phys_addr_t     phys_addr;
        resource_size_t phys_size;
-       resource_size_t xlat_align;
-       resource_size_t xlat_align_size;
        void __iomem    *vbase;
        size_t          xlat_size;
        size_t          buf_size;
@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
 {
        struct perf_mw *mw = &perf->mw;
        size_t xlat_size, buf_size;
+       resource_size_t xlat_align;
+       resource_size_t xlat_align_size;
        int rc;
 
        if (!size)
                return -EINVAL;
 
-       xlat_size = round_up(size, mw->xlat_align_size);
-       buf_size = round_up(size, mw->xlat_align);
+       rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align,
+                             &xlat_align_size, NULL);
+       if (rc)
+               return rc;
+
+       xlat_size = round_up(size, xlat_align_size);
+       buf_size = round_up(size, xlat_align);
 
        if (mw->xlat_size == xlat_size)
                return 0;
@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
 
        mw = &perf->mw;
 
-       rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
-                             &mw->xlat_align_size, NULL);
-       if (rc)
-               return rc;
-
        rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
        if (rc)
                return rc;
index 938a18bcfc3f85b5b1acd0d66b54f20dbaf96c78..3f5a92bae6f8e4ecea11a865cbee43b5473ac052 100644 (file)
@@ -107,9 +107,9 @@ struct pp_ctx {
 
 static struct dentry *pp_debugfs_dir;
 
-static void pp_ping(unsigned long ctx)
+static void pp_ping(struct timer_list *t)
 {
-       struct pp_ctx *pp = (void *)ctx;
+       struct pp_ctx *pp = from_timer(pp, t, db_timer);
        unsigned long irqflags;
        u64 db_bits, db_mask;
        u32 spad_rd, spad_wr;
@@ -153,7 +153,7 @@ static void pp_link_event(void *ctx)
 
        if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
                dev_dbg(&pp->ntb->dev, "link is up\n");
-               pp_ping((unsigned long)pp);
+               pp_ping(&pp->db_timer);
        } else {
                dev_dbg(&pp->ntb->dev, "link is down\n");
                del_timer(&pp->db_timer);
@@ -252,7 +252,7 @@ static int pp_probe(struct ntb_client *client,
        pp->db_bits = 0;
        atomic_set(&pp->count, 0);
        spin_lock_init(&pp->db_lock);
-       setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+       timer_setup(&pp->db_timer, pp_ping, 0);
        pp->db_delay = msecs_to_jiffies(delay_ms);
 
        rc = ntb_set_ctx(ntb, pp, &pp_ops);
index a69815c45ce6f2137d9f409bad0b8742fa530df4..91526a986caab7cc69c328df15a145c14f10899b 100644 (file)
@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
 
        phys_addr_t base;
        resource_size_t mw_size;
-       resource_size_t align_addr;
-       resource_size_t align_size;
-       resource_size_t max_size;
+       resource_size_t align_addr = 0;
+       resource_size_t align_size = 0;
+       resource_size_t max_size = 0;
 
        buf_size = min_t(size_t, size, 512);
 
index f2e649ff746f546b153d5ff1f64dc70958490c56..26618ba8f92a55e8d42db822b507bb8e8827e49f 100644 (file)
@@ -761,10 +761,10 @@ EXPORT_SYMBOL(of_find_node_opts_by_path);
 
 /**
  *     of_find_node_by_name - Find a node by its "name" property
- *     @from:  The node to start searching from or NULL, the node
+ *     @from:  The node to start searching from or NULL; the node
  *             you pass will not be searched, only the next one
- *             will; typically, you pass what the previous call
- *             returned. of_node_put() will be called on it
+ *             will. Typically, you pass what the previous call
+ *             returned. of_node_put() will be called on @from.
  *     @name:  The name string to match against
  *
  *     Returns a node pointer with refcount incremented, use
index e9ec931f5b9a565c7963dc8f3cf1b1c686effa23..a7b1cb6c2f657798a103f6f55acf3ef6f265377c 100644 (file)
@@ -374,7 +374,7 @@ int of_pci_map_rid(struct device_node *np, u32 rid,
 
                pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
                        np, map_name, map_mask, rid_base, out_base,
-                       rid_len, rid, *id_out);
+                       rid_len, rid, masked_rid - rid_base + out_base);
                return 0;
        }
 
index 3031fc2f18f6fd6b8bb5ae8bd17b9c72ed733b4f..32389acfa6164eb69dcd014f18ce12e58cb90050 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+DTC_FLAGS_testcases := -Wno-interrupts_property
 obj-y += testcases.dtb.o
 
 targets += testcases.dtb testcases.dtb.S
index ce49463d9d32b69bd6374ac3ebecdfe08c75b020..55fe0ee20109fd68e7bc663f8f00f78618a5abd6 100644 (file)
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /dts-v1/;
+/plugin/;
+
 / {
        testcase-data {
                changeset {
 #include "tests-match.dtsi"
 #include "tests-platform.dtsi"
 #include "tests-overlay.dtsi"
-
-/*
- * phandle fixup data - generated by dtc patches that aren't upstream.
- * This data must be regenerated whenever phandle references are modified in
- * the testdata tree.
- *
- * The format of this data may be subject to change. For the time being consider
- * this a kernel-internal data format.
- */
-/ { __local_fixups__ {
-       testcase-data {
-               phandle-tests {
-                       consumer-a {
-                               phandle-list = <0x00000000 0x00000008
-                                               0x00000018 0x00000028
-                                               0x00000034 0x00000038>;
-                               phandle-list-bad-args = <0x00000000 0x0000000c>;
-                       };
-               };
-               interrupts {
-                       intmap0 {
-                               interrupt-map = <0x00000004 0x00000010
-                                                0x00000024 0x00000034>;
-                       };
-                       intmap1 {
-                               interrupt-map = <0x0000000c>;
-                       };
-                       interrupts0 {
-                               interrupt-parent = <0x00000000>;
-                       };
-                       interrupts1 {
-                               interrupt-parent = <0x00000000>;
-                       };
-                       interrupts-extended0 {
-                               interrupts-extended = <0x00000000 0x00000008
-                                                      0x00000018 0x00000024
-                                                      0x0000002c 0x00000034
-                                                      0x0000003c>;
-                       };
-               };
-               testcase-device1 {
-                       interrupt-parent = <0x00000000>;
-               };
-               testcase-device2 {
-                       interrupt-parent = <0x00000000>;
-               };
-               overlay2 {
-                       fragment@0 {
-                               target = <0x00000000>;
-                       };
-               };
-               overlay3 {
-                       fragment@0 {
-                               target = <0x00000000>;
-                       };
-               };
-               overlay4 {
-                       fragment@0 {
-                               target = <0x00000000>;
-                       };
-               };
-       };
-}; };
index 90944667cceada37ea3e9176c9ac37f5413bc649..bda151788f3f723198be6c3401393f3876f5babf 100644 (file)
@@ -80,15 +80,6 @@ config XEN_PCIDEV_FRONTEND
           The PCI device frontend driver allows the kernel to import arbitrary
           PCI devices from a PCI backend to support PCI driver domains.
 
-config HT_IRQ
-       bool "Interrupts on hypertransport devices"
-       default y
-       depends on PCI && X86_LOCAL_APIC
-       help
-          This allows native hypertransport devices to use interrupts.
-
-          If unsure say Y.
-
 config PCI_ATS
        bool
 
index 3d5e047f0a3284e1859fcd14dfd7936c2a6424b7..c7819b973df7e4bbd07cd49e3bf29eea92eafa07 100644 (file)
@@ -21,9 +21,6 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
 # Build the PCI MSI interrupt support
 obj-$(CONFIG_PCI_MSI) += msi.o
 
-# Build the Hypertransport interrupt support
-obj-$(CONFIG_HT_IRQ) += htirq.o
-
 obj-$(CONFIG_PCI_ATS) += ats.o
 obj-$(CONFIG_PCI_IOV) += iov.o
 
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
deleted file mode 100644 (file)
index bb88c26..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * File:       htirq.c
- * Purpose:    Hypertransport Interrupt Capability
- *
- * Copyright (C) 2006 Linux Networx
- * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/htirq.h>
-
-/* Global ht irq lock.
- *
- * This is needed to serialize access to the data port in hypertransport
- * irq capability.
- *
- * With multiple simultaneous hypertransport irq devices it might pay
- * to make this more fine grained.  But start with simple, stupid, and correct.
- */
-static DEFINE_SPINLOCK(ht_irq_lock);
-
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
-       struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-       unsigned long flags;
-
-       spin_lock_irqsave(&ht_irq_lock, flags);
-       if (cfg->msg.address_lo != msg->address_lo) {
-               pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
-               pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
-       }
-       if (cfg->msg.address_hi != msg->address_hi) {
-               pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
-               pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
-       }
-       if (cfg->update)
-               cfg->update(cfg->dev, irq, msg);
-       spin_unlock_irqrestore(&ht_irq_lock, flags);
-       cfg->msg = *msg;
-}
-
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
-       struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-
-       *msg = cfg->msg;
-}
-
-void mask_ht_irq(struct irq_data *data)
-{
-       struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
-       struct ht_irq_msg msg = cfg->msg;
-
-       msg.address_lo |= 1;
-       write_ht_irq_msg(data->irq, &msg);
-}
-
-void unmask_ht_irq(struct irq_data *data)
-{
-       struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
-       struct ht_irq_msg msg = cfg->msg;
-
-       msg.address_lo &= ~1;
-       write_ht_irq_msg(data->irq, &msg);
-}
-
-/**
- * __ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- * @update: Function to be called when changing the htirq message
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
-{
-       int max_irq, pos, irq;
-       unsigned long flags;
-       u32 data;
-
-       pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
-       if (!pos)
-               return -EINVAL;
-
-       /* Verify the idx I want to use is in range */
-       spin_lock_irqsave(&ht_irq_lock, flags);
-       pci_write_config_byte(dev, pos + 2, 1);
-       pci_read_config_dword(dev, pos + 4, &data);
-       spin_unlock_irqrestore(&ht_irq_lock, flags);
-
-       max_irq = (data >> 16) & 0xff;
-       if (idx > max_irq)
-               return -EINVAL;
-
-       irq = arch_setup_ht_irq(idx, pos, dev, update);
-       if (irq > 0)
-               dev_dbg(&dev->dev, "irq %d for HT\n", irq);
-
-       return irq;
-}
-EXPORT_SYMBOL(__ht_create_irq);
-
-/**
- * ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- *
- * ht_create_irq needs to be called for all hypertransport devices
- * that generate irqs.
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int ht_create_irq(struct pci_dev *dev, int idx)
-{
-       return __ht_create_irq(dev, idx, NULL);
-}
-EXPORT_SYMBOL(ht_create_irq);
-
-/**
- * ht_destroy_irq - destroy an irq created with ht_create_irq
- * @irq: irq to be destroyed
- *
- * This reverses ht_create_irq removing the specified irq from
- * existence.  The irq should be free before this happens.
- */
-void ht_destroy_irq(unsigned int irq)
-{
-       arch_teardown_ht_irq(irq);
-}
-EXPORT_SYMBOL(ht_destroy_irq);
index da45dbea20ce6af4d15ed8b758c13b4437b093f8..730cc897b94da0251dac161192fd1bf1fc47b324 100644 (file)
@@ -13,6 +13,7 @@
  *
  */
 
+#include <linux/switchtec.h>
 #include <linux/switchtec_ioctl.h>
 
 #include <linux/interrupt.h>
@@ -20,8 +21,6 @@
 #include <linux/fs.h>
 #include <linux/uaccess.h>
 #include <linux/poll.h>
-#include <linux/pci.h>
-#include <linux/cdev.h>
 #include <linux/wait.h>
 
 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644);
 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
 
 static dev_t switchtec_devt;
-static struct class *switchtec_class;
 static DEFINE_IDA(switchtec_minor_ida);
 
-#define MICROSEMI_VENDOR_ID         0x11f8
-#define MICROSEMI_NTB_CLASSCODE     0x068000
-#define MICROSEMI_MGMT_CLASSCODE    0x058000
-
-#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
-#define SWITCHTEC_MAX_PFF_CSR 48
-
-#define SWITCHTEC_EVENT_OCCURRED BIT(0)
-#define SWITCHTEC_EVENT_CLEAR    BIT(0)
-#define SWITCHTEC_EVENT_EN_LOG   BIT(1)
-#define SWITCHTEC_EVENT_EN_CLI   BIT(2)
-#define SWITCHTEC_EVENT_EN_IRQ   BIT(3)
-#define SWITCHTEC_EVENT_FATAL    BIT(4)
-
-enum {
-       SWITCHTEC_GAS_MRPC_OFFSET       = 0x0000,
-       SWITCHTEC_GAS_TOP_CFG_OFFSET    = 0x1000,
-       SWITCHTEC_GAS_SW_EVENT_OFFSET   = 0x1800,
-       SWITCHTEC_GAS_SYS_INFO_OFFSET   = 0x2000,
-       SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
-       SWITCHTEC_GAS_PART_CFG_OFFSET   = 0x4000,
-       SWITCHTEC_GAS_NTB_OFFSET        = 0x10000,
-       SWITCHTEC_GAS_PFF_CSR_OFFSET    = 0x134000,
-};
-
-struct mrpc_regs {
-       u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
-       u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
-       u32 cmd;
-       u32 status;
-       u32 ret_value;
-} __packed;
-
-enum mrpc_status {
-       SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
-       SWITCHTEC_MRPC_STATUS_DONE = 2,
-       SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
-       SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
-};
-
-struct sw_event_regs {
-       u64 event_report_ctrl;
-       u64 reserved1;
-       u64 part_event_bitmap;
-       u64 reserved2;
-       u32 global_summary;
-       u32 reserved3[3];
-       u32 stack_error_event_hdr;
-       u32 stack_error_event_data;
-       u32 reserved4[4];
-       u32 ppu_error_event_hdr;
-       u32 ppu_error_event_data;
-       u32 reserved5[4];
-       u32 isp_error_event_hdr;
-       u32 isp_error_event_data;
-       u32 reserved6[4];
-       u32 sys_reset_event_hdr;
-       u32 reserved7[5];
-       u32 fw_exception_hdr;
-       u32 reserved8[5];
-       u32 fw_nmi_hdr;
-       u32 reserved9[5];
-       u32 fw_non_fatal_hdr;
-       u32 reserved10[5];
-       u32 fw_fatal_hdr;
-       u32 reserved11[5];
-       u32 twi_mrpc_comp_hdr;
-       u32 twi_mrpc_comp_data;
-       u32 reserved12[4];
-       u32 twi_mrpc_comp_async_hdr;
-       u32 twi_mrpc_comp_async_data;
-       u32 reserved13[4];
-       u32 cli_mrpc_comp_hdr;
-       u32 cli_mrpc_comp_data;
-       u32 reserved14[4];
-       u32 cli_mrpc_comp_async_hdr;
-       u32 cli_mrpc_comp_async_data;
-       u32 reserved15[4];
-       u32 gpio_interrupt_hdr;
-       u32 gpio_interrupt_data;
-       u32 reserved16[4];
-} __packed;
-
-enum {
-       SWITCHTEC_CFG0_RUNNING = 0x04,
-       SWITCHTEC_CFG1_RUNNING = 0x05,
-       SWITCHTEC_IMG0_RUNNING = 0x03,
-       SWITCHTEC_IMG1_RUNNING = 0x07,
-};
-
-struct sys_info_regs {
-       u32 device_id;
-       u32 device_version;
-       u32 firmware_version;
-       u32 reserved1;
-       u32 vendor_table_revision;
-       u32 table_format_version;
-       u32 partition_id;
-       u32 cfg_file_fmt_version;
-       u16 cfg_running;
-       u16 img_running;
-       u32 reserved2[57];
-       char vendor_id[8];
-       char product_id[16];
-       char product_revision[4];
-       char component_vendor[8];
-       u16 component_id;
-       u8 component_revision;
-} __packed;
-
-struct flash_info_regs {
-       u32 flash_part_map_upd_idx;
-
-       struct active_partition_info {
-               u32 address;
-               u32 build_version;
-               u32 build_string;
-       } active_img;
-
-       struct active_partition_info active_cfg;
-       struct active_partition_info inactive_img;
-       struct active_partition_info inactive_cfg;
-
-       u32 flash_length;
-
-       struct partition_info {
-               u32 address;
-               u32 length;
-       } cfg0;
-
-       struct partition_info cfg1;
-       struct partition_info img0;
-       struct partition_info img1;
-       struct partition_info nvlog;
-       struct partition_info vendor[8];
-};
-
-struct ntb_info_regs {
-       u8  partition_count;
-       u8  partition_id;
-       u16 reserved1;
-       u64 ep_map;
-       u16 requester_id;
-} __packed;
-
-struct part_cfg_regs {
-       u32 status;
-       u32 state;
-       u32 port_cnt;
-       u32 usp_port_mode;
-       u32 usp_pff_inst_id;
-       u32 vep_pff_inst_id;
-       u32 dsp_pff_inst_id[47];
-       u32 reserved1[11];
-       u16 vep_vector_number;
-       u16 usp_vector_number;
-       u32 port_event_bitmap;
-       u32 reserved2[3];
-       u32 part_event_summary;
-       u32 reserved3[3];
-       u32 part_reset_hdr;
-       u32 part_reset_data[5];
-       u32 mrpc_comp_hdr;
-       u32 mrpc_comp_data[5];
-       u32 mrpc_comp_async_hdr;
-       u32 mrpc_comp_async_data[5];
-       u32 dyn_binding_hdr;
-       u32 dyn_binding_data[5];
-       u32 reserved4[159];
-} __packed;
-
-enum {
-       SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
-       SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
-       SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
-       SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
-};
-
-struct pff_csr_regs {
-       u16 vendor_id;
-       u16 device_id;
-       u32 pci_cfg_header[15];
-       u32 pci_cap_region[48];
-       u32 pcie_cap_region[448];
-       u32 indirect_gas_window[128];
-       u32 indirect_gas_window_off;
-       u32 reserved[127];
-       u32 pff_event_summary;
-       u32 reserved2[3];
-       u32 aer_in_p2p_hdr;
-       u32 aer_in_p2p_data[5];
-       u32 aer_in_vep_hdr;
-       u32 aer_in_vep_data[5];
-       u32 dpc_hdr;
-       u32 dpc_data[5];
-       u32 cts_hdr;
-       u32 cts_data[5];
-       u32 reserved3[6];
-       u32 hotplug_hdr;
-       u32 hotplug_data[5];
-       u32 ier_hdr;
-       u32 ier_data[5];
-       u32 threshold_hdr;
-       u32 threshold_data[5];
-       u32 power_mgmt_hdr;
-       u32 power_mgmt_data[5];
-       u32 tlp_throttling_hdr;
-       u32 tlp_throttling_data[5];
-       u32 force_speed_hdr;
-       u32 force_speed_data[5];
-       u32 credit_timeout_hdr;
-       u32 credit_timeout_data[5];
-       u32 link_state_hdr;
-       u32 link_state_data[5];
-       u32 reserved4[174];
-} __packed;
-
-struct switchtec_dev {
-       struct pci_dev *pdev;
-       struct device dev;
-       struct cdev cdev;
-
-       int partition;
-       int partition_count;
-       int pff_csr_count;
-       char pff_local[SWITCHTEC_MAX_PFF_CSR];
-
-       void __iomem *mmio;
-       struct mrpc_regs __iomem *mmio_mrpc;
-       struct sw_event_regs __iomem *mmio_sw_event;
-       struct sys_info_regs __iomem *mmio_sys_info;
-       struct flash_info_regs __iomem *mmio_flash_info;
-       struct ntb_info_regs __iomem *mmio_ntb;
-       struct part_cfg_regs __iomem *mmio_part_cfg;
-       struct part_cfg_regs __iomem *mmio_part_cfg_all;
-       struct pff_csr_regs __iomem *mmio_pff_csr;
-
-       /*
-        * The mrpc mutex must be held when accessing the other
-        * mrpc_ fields, alive flag and stuser->state field
-        */
-       struct mutex mrpc_mutex;
-       struct list_head mrpc_queue;
-       int mrpc_busy;
-       struct work_struct mrpc_work;
-       struct delayed_work mrpc_timeout;
-       bool alive;
-
-       wait_queue_head_t event_wq;
-       atomic_t event_cnt;
-};
-
-static struct switchtec_dev *to_stdev(struct device *dev)
-{
-       return container_of(dev, struct switchtec_dev, dev);
-}
+struct class *switchtec_class;
+EXPORT_SYMBOL_GPL(switchtec_class);
 
 enum mrpc_state {
        MRPC_IDLE = 0,
@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = {
        .compat_ioctl = switchtec_dev_ioctl,
 };
 
+static void link_event_work(struct work_struct *work)
+{
+       struct switchtec_dev *stdev;
+
+       stdev = container_of(work, struct switchtec_dev, link_event_work);
+
+       if (stdev->link_notifier)
+               stdev->link_notifier(stdev);
+}
+
+static void check_link_state_events(struct switchtec_dev *stdev)
+{
+       int idx;
+       u32 reg;
+       int count;
+       int occurred = 0;
+
+       for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+               reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
+               dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
+               count = (reg >> 5) & 0xFF;
+
+               if (count != stdev->link_event_count[idx]) {
+                       occurred = 1;
+                       stdev->link_event_count[idx] = count;
+               }
+       }
+
+       if (occurred)
+               schedule_work(&stdev->link_event_work);
+}
+
+static void enable_link_state_events(struct switchtec_dev *stdev)
+{
+       int idx;
+
+       for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+               iowrite32(SWITCHTEC_EVENT_CLEAR |
+                         SWITCHTEC_EVENT_EN_IRQ,
+                         &stdev->mmio_pff_csr[idx].link_state_hdr);
+       }
+}
+
 static void stdev_release(struct device *dev)
 {
        struct switchtec_dev *stdev = to_stdev(dev);
@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
        stdev->mrpc_busy = 0;
        INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
        INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
+       INIT_WORK(&stdev->link_event_work, link_event_work);
        init_waitqueue_head(&stdev->event_wq);
        atomic_set(&stdev->event_cnt, 0);
 
@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
        if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
                return 0;
 
+       if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
+               return 0;
+
        dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
        hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
        iowrite32(hdr, hdr_reg);
@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid)
                for (idx = 0; idx < stdev->pff_csr_count; idx++) {
                        if (!stdev->pff_local[idx])
                                continue;
+
                        count += mask_event(stdev, eid, idx);
                }
        } else {
@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
                iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
        }
 
+       check_link_state_events(stdev);
+
        for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
                event_count += mask_all_events(stdev, eid);
 
@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
        struct switchtec_dev *stdev;
        int rc;
 
+       if (pdev->class == MICROSEMI_NTB_CLASSCODE)
+               request_module_nowait("ntb_hw_switchtec");
+
        stdev = stdev_create(pdev);
        if (IS_ERR(stdev))
                return PTR_ERR(stdev);
@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
        iowrite32(SWITCHTEC_EVENT_CLEAR |
                  SWITCHTEC_EVENT_EN_IRQ,
                  &stdev->mmio_part_cfg->mrpc_comp_hdr);
+       enable_link_state_events(stdev);
 
        rc = cdev_device_add(&stdev->cdev, &stdev->dev);
        if (rc)
index 2f490930430d46a2f7056aa9b5576266d21a4937..93a5c7423d8093728a6021a4956ca8ee18c97162 100644 (file)
@@ -144,6 +144,7 @@ int pcmcia_badge4_init(struct sa1111_dev *dev)
                                 sa11xx_drv_pcmcia_add_one);
 }
 
+#ifndef MODULE
 static int __init pcmv_setup(char *s)
 {
        int v[4];
@@ -158,3 +159,4 @@ static int __init pcmv_setup(char *s)
 }
 
 __setup("pcmv=", pcmv_setup);
+#endif
index 09dac11337d10bf6abebb836a6db93b6abc5dd4b..2c745e8ccad6c190882725d8864d4f96ae2e68be 100644 (file)
@@ -93,12 +93,31 @@ config ASUS_LAPTOP
 
 config DELL_SMBIOS
        tristate
-       select DCDBAS
+
+config DELL_SMBIOS_WMI
+       tristate "Dell SMBIOS calling interface (WMI implementation)"
+       depends on ACPI_WMI
+       select DELL_WMI_DESCRIPTOR
+       select DELL_SMBIOS
+       ---help---
+       This provides an implementation for the Dell SMBIOS calling interface
+       communicated over ACPI-WMI.
+
+       If you have a Dell computer from >2007 you should say Y or M here.
+       If you aren't sure and this module doesn't work for your computer
+       it just won't load.
+
+config DELL_SMBIOS_SMM
+       tristate "Dell SMBIOS calling interface (SMM implementation)"
+       depends on DCDBAS
+       select DELL_SMBIOS
        ---help---
-       This module provides common functions for kernel modules using
-       Dell SMBIOS.
+       This provides an implementation for the Dell SMBIOS calling interface
+       communicated over SMI/SMM.
 
-       If you have a Dell laptop, say Y or M here.
+       If you have a Dell computer from <=2017 you should say Y or M here.
+       If you aren't sure and this module doesn't work for your computer
+       it just won't load.
 
 config DELL_LAPTOP
        tristate "Dell Laptop Extras"
@@ -116,11 +135,12 @@ config DELL_LAPTOP
        laptops (except for some models covered by the Compal driver).
 
 config DELL_WMI
-       tristate "Dell WMI extras"
+       tristate "Dell WMI notifications"
        depends on ACPI_WMI
        depends on DMI
        depends on INPUT
        depends on ACPI_VIDEO || ACPI_VIDEO = n
+       select DELL_WMI_DESCRIPTOR
        select DELL_SMBIOS
        select INPUT_SPARSEKMAP
        ---help---
@@ -129,6 +149,10 @@ config DELL_WMI
          To compile this driver as a module, choose M here: the module will
          be called dell-wmi.
 
+config DELL_WMI_DESCRIPTOR
+       tristate
+       depends on ACPI_WMI
+
 config DELL_WMI_AIO
        tristate "WMI Hotkeys for Dell All-In-One series"
        depends on ACPI_WMI
@@ -426,7 +450,6 @@ config THINKPAD_ACPI_ALSA_SUPPORT
 config THINKPAD_ACPI_DEBUGFACILITIES
        bool "Maintainer debug facilities"
        depends on THINKPAD_ACPI
-       default n
        ---help---
          Enables extra stuff in the thinkpad-acpi which is completely useless
          for normal use.  Read the driver source to find out what it does.
@@ -437,7 +460,6 @@ config THINKPAD_ACPI_DEBUGFACILITIES
 config THINKPAD_ACPI_DEBUG
        bool "Verbose debug mode"
        depends on THINKPAD_ACPI
-       default n
        ---help---
          Enables extra debugging information, at the expense of a slightly
          increase in driver size.
@@ -447,7 +469,6 @@ config THINKPAD_ACPI_DEBUG
 config THINKPAD_ACPI_UNSAFE_LEDS
        bool "Allow control of important LEDs (unsafe)"
        depends on THINKPAD_ACPI
-       default n
        ---help---
          Overriding LED state on ThinkPads can mask important
          firmware alerts (like critical battery condition), or misled
@@ -515,7 +536,6 @@ config SENSORS_HDAPS
        tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
        depends on INPUT
        select INPUT_POLLDEV
-       default n
        help
          This driver provides support for the IBM Hard Drive Active Protection
          System (hdaps), which provides an accelerometer and other misc. data.
@@ -658,6 +678,18 @@ config WMI_BMOF
          To compile this driver as a module, choose M here: the module will
          be called wmi-bmof.
 
+config INTEL_WMI_THUNDERBOLT
+       tristate "Intel WMI thunderbolt force power driver"
+       depends on ACPI_WMI
+       ---help---
+         Say Y here if you want to be able to use the WMI interface on select
+         systems to force the power control of Intel Thunderbolt controllers.
+         This is useful for updating the firmware when devices are not plugged
+         into the controller.
+
+         To compile this driver as a module, choose M here: the module will
+         be called intel-wmi-thunderbolt.
+
 config MSI_WMI
        tristate "MSI WMI extras"
        depends on ACPI_WMI
@@ -763,7 +795,6 @@ config TOSHIBA_HAPS
 
 config TOSHIBA_WMI
        tristate "Toshiba WMI Hotkeys Driver (EXPERIMENTAL)"
-       default n
        depends on ACPI_WMI
        depends on INPUT
        select INPUT_SPARSEKMAP
@@ -785,7 +816,6 @@ config ACPI_CMPC
        depends on RFKILL || RFKILL=n
        select INPUT
        select BACKLIGHT_CLASS_DEVICE
-       default n
        help
          Support for Intel Classmate PC ACPI devices, including some
          keys as input device, backlight device, tablet and accelerometer
@@ -793,7 +823,7 @@ config ACPI_CMPC
 
 config INTEL_CHT_INT33FE
        tristate "Intel Cherry Trail ACPI INT33FE Driver"
-       depends on X86 && ACPI && I2C
+       depends on X86 && ACPI && I2C && REGULATOR
        ---help---
          This driver add support for the INT33FE ACPI device found on
          some Intel Cherry Trail devices.
@@ -804,6 +834,10 @@ config INTEL_CHT_INT33FE
          This driver instantiates i2c-clients for these, so that standard
          i2c drivers for these chips can bind to the them.
 
+         If you enable this driver it is advised to also select
+         CONFIG_TYPEC_FUSB302=m, CONFIG_CHARGER_BQ24190=m and
+         CONFIG_BATTERY_MAX17042=m.
+
 config INTEL_INT0002_VGPIO
        tristate "Intel ACPI INT0002 Virtual GPIO driver"
        depends on GPIOLIB && ACPI
@@ -892,7 +926,6 @@ config INTEL_IPS
 
 config INTEL_IMR
        bool "Intel Isolated Memory Region support"
-       default n
        depends on X86_INTEL_QUARK && IOSF_MBI
        ---help---
          This option provides a means to manipulate Isolated Memory Regions.
@@ -1088,7 +1121,6 @@ config INTEL_PUNIT_IPC
 
 config INTEL_TELEMETRY
        tristate "Intel SoC Telemetry Driver"
-       default n
        depends on INTEL_PMC_IPC && INTEL_PUNIT_IPC && X86_64
        ---help---
          This driver provides interfaces to configure and use
@@ -1111,7 +1143,6 @@ config MLX_PLATFORM
 
 config MLX_CPLD_PLATFORM
        tristate "Mellanox platform hotplug driver support"
-       default n
        select HWMON
        select I2C
        ---help---
index f9e3ae683bbe3729757d36f19e02df3c2c5ea8f9..c32b34a724679435d134615c42a9c3f3855cf463 100644 (file)
@@ -13,8 +13,11 @@ obj-$(CONFIG_MSI_LAPTOP)     += msi-laptop.o
 obj-$(CONFIG_ACPI_CMPC)                += classmate-laptop.o
 obj-$(CONFIG_COMPAL_LAPTOP)    += compal-laptop.o
 obj-$(CONFIG_DELL_SMBIOS)      += dell-smbios.o
+obj-$(CONFIG_DELL_SMBIOS_WMI)  += dell-smbios-wmi.o
+obj-$(CONFIG_DELL_SMBIOS_SMM)  += dell-smbios-smm.o
 obj-$(CONFIG_DELL_LAPTOP)      += dell-laptop.o
 obj-$(CONFIG_DELL_WMI)         += dell-wmi.o
+obj-$(CONFIG_DELL_WMI_DESCRIPTOR)      += dell-wmi-descriptor.o
 obj-$(CONFIG_DELL_WMI_AIO)     += dell-wmi-aio.o
 obj-$(CONFIG_DELL_WMI_LED)     += dell-wmi-led.o
 obj-$(CONFIG_DELL_SMO8800)     += dell-smo8800.o
@@ -40,6 +43,7 @@ obj-$(CONFIG_PEAQ_WMI)                += peaq-wmi.o
 obj-$(CONFIG_SURFACE3_WMI)     += surface3-wmi.o
 obj-$(CONFIG_TOPSTAR_LAPTOP)   += topstar-laptop.o
 obj-$(CONFIG_WMI_BMOF)         += wmi-bmof.o
+obj-$(CONFIG_INTEL_WMI_THUNDERBOLT)    += intel-wmi-thunderbolt.o
 
 # toshiba_acpi must link after wmi to ensure that wmi devices are found
 # before toshiba_acpi initializes
index 48e1541dc8d4efa3cf24e9c0df5861aeaa431798..a32c5c00e0e7bdf9868e81c655f30f8fb6cc34be 100644 (file)
@@ -119,6 +119,7 @@ MODULE_LICENSE("GPL");
 #define ASUS_WMI_DEVID_BRIGHTNESS      0x00050012
 #define ASUS_WMI_DEVID_KBD_BACKLIGHT   0x00050021
 #define ASUS_WMI_DEVID_LIGHT_SENSOR    0x00050022 /* ?? */
+#define ASUS_WMI_DEVID_LIGHTBAR                0x00050025
 
 /* Misc */
 #define ASUS_WMI_DEVID_CAMERA          0x00060013
@@ -148,6 +149,7 @@ MODULE_LICENSE("GPL");
 #define ASUS_WMI_DSTS_BIOS_BIT         0x00040000
 #define ASUS_WMI_DSTS_BRIGHTNESS_MASK  0x000000FF
 #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK  0x0000FF00
+#define ASUS_WMI_DSTS_LIGHTBAR_MASK    0x0000000F
 
 #define ASUS_FAN_DESC                  "cpu_fan"
 #define ASUS_FAN_MFUN                  0x13
@@ -222,10 +224,13 @@ struct asus_wmi {
        int tpd_led_wk;
        struct led_classdev kbd_led;
        int kbd_led_wk;
+       struct led_classdev lightbar_led;
+       int lightbar_led_wk;
        struct workqueue_struct *led_workqueue;
        struct work_struct tpd_led_work;
        struct work_struct kbd_led_work;
        struct work_struct wlan_led_work;
+       struct work_struct lightbar_led_work;
 
        struct asus_rfkill wlan;
        struct asus_rfkill bluetooth;
@@ -567,6 +572,48 @@ static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
        return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
 }
 
+static void lightbar_led_update(struct work_struct *work)
+{
+       struct asus_wmi *asus;
+       int ctrl_param;
+
+       asus = container_of(work, struct asus_wmi, lightbar_led_work);
+
+       ctrl_param = asus->lightbar_led_wk;
+       asus_wmi_set_devstate(ASUS_WMI_DEVID_LIGHTBAR, ctrl_param, NULL);
+}
+
+static void lightbar_led_set(struct led_classdev *led_cdev,
+                            enum led_brightness value)
+{
+       struct asus_wmi *asus;
+
+       asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+
+       asus->lightbar_led_wk = !!value;
+       queue_work(asus->led_workqueue, &asus->lightbar_led_work);
+}
+
+static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
+{
+       struct asus_wmi *asus;
+       u32 result;
+
+       asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+       asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
+
+       return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
+}
+
+static int lightbar_led_presence(struct asus_wmi *asus)
+{
+       u32 result;
+
+       asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
+
+       return result & ASUS_WMI_DSTS_PRESENCE_BIT;
+}
+
 static void asus_wmi_led_exit(struct asus_wmi *asus)
 {
        if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
@@ -575,6 +622,8 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
                led_classdev_unregister(&asus->tpd_led);
        if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
                led_classdev_unregister(&asus->wlan_led);
+       if (!IS_ERR_OR_NULL(asus->lightbar_led.dev))
+               led_classdev_unregister(&asus->lightbar_led);
        if (asus->led_workqueue)
                destroy_workqueue(asus->led_workqueue);
 }
@@ -630,6 +679,20 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
 
                rv = led_classdev_register(&asus->platform_device->dev,
                                           &asus->wlan_led);
+               if (rv)
+                       goto error;
+       }
+
+       if (lightbar_led_presence(asus)) {
+               INIT_WORK(&asus->lightbar_led_work, lightbar_led_update);
+
+               asus->lightbar_led.name = "asus::lightbar";
+               asus->lightbar_led.brightness_set = lightbar_led_set;
+               asus->lightbar_led.brightness_get = lightbar_led_get;
+               asus->lightbar_led.max_brightness = 1;
+
+               rv = led_classdev_register(&asus->platform_device->dev,
+                                          &asus->lightbar_led);
        }
 
 error:
index f42159fd20318beb15b6495fa9c3027e47326c2a..bf897b1832b188c24a92f33fdf74b33b25aeac43 100644 (file)
 #include "dell-rbtn.h"
 #include "dell-smbios.h"
 
-#define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
-#define GLOBAL_MIC_MUTE_ENABLE 0x0364
-#define GLOBAL_MIC_MUTE_DISABLE 0x0365
-#define KBD_LED_AC_TOKEN 0x0451
-
 struct quirk_entry {
        u8 touchpad_led;
 
@@ -85,6 +73,7 @@ static struct platform_driver platform_driver = {
        }
 };
 
+static struct calling_interface_buffer *buffer;
 static struct platform_device *platform_device;
 static struct backlight_device *dell_backlight_device;
 static struct rfkill *wifi_rfkill;
@@ -283,6 +272,27 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
        { }
 };
 
+void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+{
+       memset(buffer, 0, sizeof(struct calling_interface_buffer));
+       buffer->input[0] = arg0;
+       buffer->input[1] = arg1;
+       buffer->input[2] = arg2;
+       buffer->input[3] = arg3;
+}
+
+int dell_send_request(u16 class, u16 select)
+{
+       int ret;
+
+       buffer->cmd_class = class;
+       buffer->cmd_select = select;
+       ret = dell_smbios_call(buffer);
+       if (ret != 0)
+               return ret;
+       return dell_smbios_error(buffer->output[0]);
+}
+
 /*
  * Derived from information in smbios-wireless-ctl:
  *
@@ -405,7 +415,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
 
 static int dell_rfkill_set(void *data, bool blocked)
 {
-       struct calling_interface_buffer *buffer;
        int disable = blocked ? 1 : 0;
        unsigned long radio = (unsigned long)data;
        int hwswitch_bit = (unsigned long)data - 1;
@@ -413,20 +422,16 @@ static int dell_rfkill_set(void *data, bool blocked)
        int status;
        int ret;
 
-       buffer = dell_smbios_get_buffer();
-
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0, 0, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+       if (ret)
+               return ret;
        status = buffer->output[1];
 
-       if (ret != 0)
-               goto out;
-
-       dell_smbios_clear_buffer();
-
-       buffer->input[0] = 0x2;
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0x2, 0, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+       if (ret)
+               return ret;
        hwswitch = buffer->output[1];
 
        /* If the hardware switch controls this radio, and the hardware
@@ -435,28 +440,19 @@ static int dell_rfkill_set(void *data, bool blocked)
            (status & BIT(0)) && !(status & BIT(16)))
                disable = 1;
 
-       dell_smbios_clear_buffer();
-
-       buffer->input[0] = (1 | (radio<<8) | (disable << 16));
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
-
- out:
-       dell_smbios_release_buffer();
-       return dell_smbios_error(ret);
+       dell_set_arguments(1 | (radio<<8) | (disable << 16), 0, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+       return ret;
 }
 
-/* Must be called with the buffer held */
 static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
-                                       int status,
-                                       struct calling_interface_buffer *buffer)
+                                       int status)
 {
        if (status & BIT(0)) {
                /* Has hw-switch, sync sw_state to BIOS */
                int block = rfkill_blocked(rfkill);
-               dell_smbios_clear_buffer();
-               buffer->input[0] = (1 | (radio << 8) | (block << 16));
-               dell_smbios_send_request(17, 11);
+               dell_set_arguments(1 | (radio << 8) | (block << 16), 0, 0, 0);
+               dell_send_request(CLASS_INFO, SELECT_RFKILL);
        } else {
                /* No hw-switch, sync BIOS state to sw_state */
                rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
@@ -472,32 +468,23 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
 
 static void dell_rfkill_query(struct rfkill *rfkill, void *data)
 {
-       struct calling_interface_buffer *buffer;
        int radio = ((unsigned long)data & 0xF);
        int hwswitch;
        int status;
        int ret;
 
-       buffer = dell_smbios_get_buffer();
-
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0, 0, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
        status = buffer->output[1];
 
        if (ret != 0 || !(status & BIT(0))) {
-               dell_smbios_release_buffer();
                return;
        }
 
-       dell_smbios_clear_buffer();
-
-       buffer->input[0] = 0x2;
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0, 0x2, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
        hwswitch = buffer->output[1];
 
-       dell_smbios_release_buffer();
-
        if (ret != 0)
                return;
 
@@ -513,27 +500,23 @@ static struct dentry *dell_laptop_dir;
 
 static int dell_debugfs_show(struct seq_file *s, void *data)
 {
-       struct calling_interface_buffer *buffer;
        int hwswitch_state;
        int hwswitch_ret;
        int status;
        int ret;
 
-       buffer = dell_smbios_get_buffer();
-
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0, 0, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+       if (ret)
+               return ret;
        status = buffer->output[1];
 
-       dell_smbios_clear_buffer();
-
-       buffer->input[0] = 0x2;
-       dell_smbios_send_request(17, 11);
-       hwswitch_ret = buffer->output[0];
+       dell_set_arguments(0, 0x2, 0, 0);
+       hwswitch_ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+       if (hwswitch_ret)
+               return hwswitch_ret;
        hwswitch_state = buffer->output[1];
 
-       dell_smbios_release_buffer();
-
        seq_printf(s, "return:\t%d\n", ret);
        seq_printf(s, "status:\t0x%X\n", status);
        seq_printf(s, "Bit 0 : Hardware switch supported:   %lu\n",
@@ -613,46 +596,36 @@ static const struct file_operations dell_debugfs_fops = {
 
 static void dell_update_rfkill(struct work_struct *ignored)
 {
-       struct calling_interface_buffer *buffer;
        int hwswitch = 0;
        int status;
        int ret;
 
-       buffer = dell_smbios_get_buffer();
-
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0, 0, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
        status = buffer->output[1];
 
        if (ret != 0)
-               goto out;
-
-       dell_smbios_clear_buffer();
+               return;
 
-       buffer->input[0] = 0x2;
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0, 0x2, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
 
        if (ret == 0 && (status & BIT(0)))
                hwswitch = buffer->output[1];
 
        if (wifi_rfkill) {
                dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
-               dell_rfkill_update_sw_state(wifi_rfkill, 1, status, buffer);
+               dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
        }
        if (bluetooth_rfkill) {
                dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
                                            hwswitch);
-               dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status,
-                                           buffer);
+               dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
        }
        if (wwan_rfkill) {
                dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
-               dell_rfkill_update_sw_state(wwan_rfkill, 3, status, buffer);
+               dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
        }
-
- out:
-       dell_smbios_release_buffer();
 }
 static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
 
@@ -696,7 +669,6 @@ static struct notifier_block dell_laptop_rbtn_notifier = {
 
 static int __init dell_setup_rfkill(void)
 {
-       struct calling_interface_buffer *buffer;
        int status, ret, whitelisted;
        const char *product;
 
@@ -712,11 +684,9 @@ static int __init dell_setup_rfkill(void)
        if (!force_rfkill && !whitelisted)
                return 0;
 
-       buffer = dell_smbios_get_buffer();
-       dell_smbios_send_request(17, 11);
-       ret = buffer->output[0];
+       dell_set_arguments(0, 0, 0, 0);
+       ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
        status = buffer->output[1];
-       dell_smbios_release_buffer();
 
        /* dell wireless info smbios call is not supported */
        if (ret != 0)
@@ -869,7 +839,6 @@ static void dell_cleanup_rfkill(void)
 
 static int dell_send_intensity(struct backlight_device *bd)
 {
-       struct calling_interface_buffer *buffer;
        struct calling_interface_token *token;
        int ret;
 
@@ -877,24 +846,17 @@ static int dell_send_intensity(struct backlight_device *bd)
        if (!token)
                return -ENODEV;
 
-       buffer = dell_smbios_get_buffer();
-       buffer->input[0] = token->location;
-       buffer->input[1] = bd->props.brightness;
-
+       dell_set_arguments(token->location, bd->props.brightness, 0, 0);
        if (power_supply_is_system_supplied() > 0)
-               dell_smbios_send_request(1, 2);
+               ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
        else
-               dell_smbios_send_request(1, 1);
-
-       ret = dell_smbios_error(buffer->output[0]);
+               ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
 
-       dell_smbios_release_buffer();
        return ret;
 }
 
 static int dell_get_intensity(struct backlight_device *bd)
 {
-       struct calling_interface_buffer *buffer;
        struct calling_interface_token *token;
        int ret;
 
@@ -902,20 +864,14 @@ static int dell_get_intensity(struct backlight_device *bd)
        if (!token)
                return -ENODEV;
 
-       buffer = dell_smbios_get_buffer();
-       buffer->input[0] = token->location;
-
+       dell_set_arguments(token->location, 0, 0, 0);
        if (power_supply_is_system_supplied() > 0)
-               dell_smbios_send_request(0, 2);
+               ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
        else
-               dell_smbios_send_request(0, 1);
+               ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
 
-       if (buffer->output[0])
-               ret = dell_smbios_error(buffer->output[0]);
-       else
+       if (ret == 0)
                ret = buffer->output[1];
-
-       dell_smbios_release_buffer();
        return ret;
 }
 
@@ -1179,20 +1135,13 @@ static DEFINE_MUTEX(kbd_led_mutex);
 
 static int kbd_get_info(struct kbd_info *info)
 {
-       struct calling_interface_buffer *buffer;
        u8 units;
        int ret;
 
-       buffer = dell_smbios_get_buffer();
-
-       buffer->input[0] = 0x0;
-       dell_smbios_send_request(4, 11);
-       ret = buffer->output[0];
-
-       if (ret) {
-               ret = dell_smbios_error(ret);
-               goto out;
-       }
+       dell_set_arguments(0, 0, 0, 0);
+       ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+       if (ret)
+               return ret;
 
        info->modes = buffer->output[1] & 0xFFFF;
        info->type = (buffer->output[1] >> 24) & 0xFF;
@@ -1209,8 +1158,6 @@ static int kbd_get_info(struct kbd_info *info)
        if (units & BIT(3))
                info->days = (buffer->output[3] >> 24) & 0xFF;
 
- out:
-       dell_smbios_release_buffer();
        return ret;
 }
 
@@ -1269,19 +1216,12 @@ static int kbd_set_level(struct kbd_state *state, u8 level)
 
 static int kbd_get_state(struct kbd_state *state)
 {
-       struct calling_interface_buffer *buffer;
        int ret;
 
-       buffer = dell_smbios_get_buffer();
-
-       buffer->input[0] = 0x1;
-       dell_smbios_send_request(4, 11);
-       ret = buffer->output[0];
-
-       if (ret) {
-               ret = dell_smbios_error(ret);
-               goto out;
-       }
+       dell_set_arguments(0x1, 0, 0, 0);
+       ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+       if (ret)
+               return ret;
 
        state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
        if (state->mode_bit != 0)
@@ -1296,31 +1236,27 @@ static int kbd_get_state(struct kbd_state *state)
        state->timeout_value_ac = (buffer->output[2] >> 24) & 0x3F;
        state->timeout_unit_ac = (buffer->output[2] >> 30) & 0x3;
 
- out:
-       dell_smbios_release_buffer();
        return ret;
 }
 
 static int kbd_set_state(struct kbd_state *state)
 {
-       struct calling_interface_buffer *buffer;
        int ret;
+       u32 input1;
+       u32 input2;
+
+       input1 = BIT(state->mode_bit) & 0xFFFF;
+       input1 |= (state->triggers & 0xFF) << 16;
+       input1 |= (state->timeout_value & 0x3F) << 24;
+       input1 |= (state->timeout_unit & 0x3) << 30;
+       input2 = state->als_setting & 0xFF;
+       input2 |= (state->level & 0xFF) << 16;
+       input2 |= (state->timeout_value_ac & 0x3F) << 24;
+       input2 |= (state->timeout_unit_ac & 0x3) << 30;
+       dell_set_arguments(0x2, input1, input2, 0);
+       ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
 
-       buffer = dell_smbios_get_buffer();
-       buffer->input[0] = 0x2;
-       buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
-       buffer->input[1] |= (state->triggers & 0xFF) << 16;
-       buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
-       buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
-       buffer->input[2] = state->als_setting & 0xFF;
-       buffer->input[2] |= (state->level & 0xFF) << 16;
-       buffer->input[2] |= (state->timeout_value_ac & 0x3F) << 24;
-       buffer->input[2] |= (state->timeout_unit_ac & 0x3) << 30;
-       dell_smbios_send_request(4, 11);
-       ret = buffer->output[0];
-       dell_smbios_release_buffer();
-
-       return dell_smbios_error(ret);
+       return ret;
 }
 
 static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
@@ -1345,7 +1281,6 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
 
 static int kbd_set_token_bit(u8 bit)
 {
-       struct calling_interface_buffer *buffer;
        struct calling_interface_token *token;
        int ret;
 
@@ -1356,19 +1291,14 @@ static int kbd_set_token_bit(u8 bit)
        if (!token)
                return -EINVAL;
 
-       buffer = dell_smbios_get_buffer();
-       buffer->input[0] = token->location;
-       buffer->input[1] = token->value;
-       dell_smbios_send_request(1, 0);
-       ret = buffer->output[0];
-       dell_smbios_release_buffer();
+       dell_set_arguments(token->location, token->value, 0, 0);
+       ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
 
-       return dell_smbios_error(ret);
+       return ret;
 }
 
 static int kbd_get_token_bit(u8 bit)
 {
-       struct calling_interface_buffer *buffer;
        struct calling_interface_token *token;
        int ret;
        int val;
@@ -1380,15 +1310,12 @@ static int kbd_get_token_bit(u8 bit)
        if (!token)
                return -EINVAL;
 
-       buffer = dell_smbios_get_buffer();
-       buffer->input[0] = token->location;
-       dell_smbios_send_request(0, 0);
-       ret = buffer->output[0];
+       dell_set_arguments(token->location, 0, 0, 0);
+       ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_STD);
        val = buffer->output[1];
-       dell_smbios_release_buffer();
 
        if (ret)
-               return dell_smbios_error(ret);
+               return ret;
 
        return (val == token->value);
 }
@@ -2102,7 +2029,6 @@ static struct notifier_block dell_laptop_notifier = {
 
 int dell_micmute_led_set(int state)
 {
-       struct calling_interface_buffer *buffer;
        struct calling_interface_token *token;
 
        if (state == 0)
@@ -2115,11 +2041,8 @@ int dell_micmute_led_set(int state)
        if (!token)
                return -ENODEV;
 
-       buffer = dell_smbios_get_buffer();
-       buffer->input[0] = token->location;
-       buffer->input[1] = token->value;
-       dell_smbios_send_request(1, 0);
-       dell_smbios_release_buffer();
+       dell_set_arguments(token->location, token->value, 0, 0);
+       dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
 
        return state;
 }
@@ -2127,7 +2050,6 @@ EXPORT_SYMBOL_GPL(dell_micmute_led_set);
 
 static int __init dell_init(void)
 {
-       struct calling_interface_buffer *buffer;
        struct calling_interface_token *token;
        int max_intensity = 0;
        int ret;
@@ -2151,6 +2073,13 @@ static int __init dell_init(void)
        if (ret)
                goto fail_platform_device2;
 
+       buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+       if (!buffer) {
+               ret = -ENOMEM;
+               goto fail_buffer;
+       }
+
+
        ret = dell_setup_rfkill();
 
        if (ret) {
@@ -2175,12 +2104,10 @@ static int __init dell_init(void)
 
        token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
        if (token) {
-               buffer = dell_smbios_get_buffer();
-               buffer->input[0] = token->location;
-               dell_smbios_send_request(0, 2);
-               if (buffer->output[0] == 0)
+               dell_set_arguments(token->location, 0, 0, 0);
+               ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
+               if (ret)
                        max_intensity = buffer->output[3];
-               dell_smbios_release_buffer();
        }
 
        if (max_intensity) {
@@ -2214,6 +2141,8 @@ static int __init dell_init(void)
 fail_get_brightness:
        backlight_device_unregister(dell_backlight_device);
 fail_backlight:
+       kfree(buffer);
+fail_buffer:
        dell_cleanup_rfkill();
 fail_rfkill:
        platform_device_del(platform_device);
@@ -2233,6 +2162,7 @@ static void __exit dell_exit(void)
                touchpad_led_exit();
        kbd_led_exit();
        backlight_device_unregister(dell_backlight_device);
+       kfree(buffer);
        dell_cleanup_rfkill();
        if (platform_device) {
                platform_device_unregister(platform_device);
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
new file mode 100644 (file)
index 0000000..89f65c4
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ *  SMI methods for use with dell-smbios
+ *
+ *  Copyright (c) Red Hat <mjg@redhat.com>
+ *  Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ *  Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ *  Copyright (c) 2017 Dell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include "../../firmware/dcdbas.h"
+#include "dell-smbios.h"
+
+static int da_command_address;
+static int da_command_code;
+static struct calling_interface_buffer *buffer;
+struct platform_device *platform_device;
+static DEFINE_MUTEX(smm_mutex);
+
+static const struct dmi_system_id dell_device_table[] __initconst = {
+       {
+               .ident = "Dell laptop",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/
+               },
+       },
+       {
+               .ident = "Dell Computer Corporation",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+               },
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(dmi, dell_device_table);
+
+static void __init parse_da_table(const struct dmi_header *dm)
+{
+       struct calling_interface_structure *table =
+               container_of(dm, struct calling_interface_structure, header);
+
+       /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
+        * 6 bytes of entry
+        */
+       if (dm->length < 17)
+               return;
+
+       da_command_address = table->cmdIOAddress;
+       da_command_code = table->cmdIOCode;
+}
+
+static void __init find_cmd_address(const struct dmi_header *dm, void *dummy)
+{
+       switch (dm->type) {
+       case 0xda: /* Calling interface */
+               parse_da_table(dm);
+               break;
+       }
+}
+
+int dell_smbios_smm_call(struct calling_interface_buffer *input)
+{
+       struct smi_cmd command;
+       size_t size;
+
+       size = sizeof(struct calling_interface_buffer);
+       command.magic = SMI_CMD_MAGIC;
+       command.command_address = da_command_address;
+       command.command_code = da_command_code;
+       command.ebx = virt_to_phys(buffer);
+       command.ecx = 0x42534931;
+
+       mutex_lock(&smm_mutex);
+       memcpy(buffer, input, size);
+       dcdbas_smi_request(&command);
+       memcpy(input, buffer, size);
+       mutex_unlock(&smm_mutex);
+       return 0;
+}
+
+/* When enabled this indicates that SMM won't work */
+static bool test_wsmt_enabled(void)
+{
+       struct calling_interface_token *wsmt;
+
+       /* if token doesn't exist, SMM will work */
+       wsmt = dell_smbios_find_token(WSMT_EN_TOKEN);
+       if (!wsmt)
+               return false;
+
+       /* If token exists, try to access over SMM but set a dummy return.
+        * - If WSMT disabled it will be overwritten by SMM
+        * - If WSMT enabled then dummy value will remain
+        */
+       buffer->cmd_class = CLASS_TOKEN_READ;
+       buffer->cmd_select = SELECT_TOKEN_STD;
+       memset(buffer, 0, sizeof(struct calling_interface_buffer));
+       buffer->input[0] = wsmt->location;
+       buffer->output[0] = 99;
+       dell_smbios_smm_call(buffer);
+       if (buffer->output[0] == 99)
+               return true;
+
+       return false;
+}
+
+static int __init dell_smbios_smm_init(void)
+{
+       int ret;
+       /*
+        * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+        * is passed to SMI handler.
+        */
+       buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+       if (!buffer)
+               return -ENOMEM;
+
+       dmi_walk(find_cmd_address, NULL);
+
+       if (test_wsmt_enabled()) {
+               pr_debug("Disabling due to WSMT enabled\n");
+               ret = -ENODEV;
+               goto fail_wsmt;
+       }
+
+       platform_device = platform_device_alloc("dell-smbios", 1);
+       if (!platform_device) {
+               ret = -ENOMEM;
+               goto fail_platform_device_alloc;
+       }
+
+       ret = platform_device_add(platform_device);
+       if (ret)
+               goto fail_platform_device_add;
+
+       ret = dell_smbios_register_device(&platform_device->dev,
+                                         &dell_smbios_smm_call);
+       if (ret)
+               goto fail_register;
+
+       return 0;
+
+fail_register:
+       platform_device_del(platform_device);
+
+fail_platform_device_add:
+       platform_device_put(platform_device);
+
+fail_wsmt:
+fail_platform_device_alloc:
+       free_page((unsigned long)buffer);
+       return ret;
+}
+
+static void __exit dell_smbios_smm_exit(void)
+{
+       if (platform_device) {
+               dell_smbios_unregister_device(&platform_device->dev);
+               platform_device_unregister(platform_device);
+               free_page((unsigned long)buffer);
+       }
+}
+
+subsys_initcall(dell_smbios_smm_init);
+module_exit(dell_smbios_smm_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell SMBIOS communications over SMI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
new file mode 100644 (file)
index 0000000..609557a
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ *  WMI methods for use with dell-smbios
+ *
+ *  Copyright (c) 2017 Dell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/wmi.h>
+#include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
+
+static DEFINE_MUTEX(call_mutex);
+static DEFINE_MUTEX(list_mutex);
+static int wmi_supported;
+
+struct misc_bios_flags_structure {
+       struct dmi_header header;
+       u16 flags0;
+} __packed;
+#define FLAG_HAS_ACPI_WMI 0x02
+
+#define DELL_WMI_SMBIOS_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+
+struct wmi_smbios_priv {
+       struct dell_wmi_smbios_buffer *buf;
+       struct list_head list;
+       struct wmi_device *wdev;
+       struct device *child;
+       u32 req_buf_size;
+};
+static LIST_HEAD(wmi_list);
+
+static inline struct wmi_smbios_priv *get_first_smbios_priv(void)
+{
+       return list_first_entry_or_null(&wmi_list,
+                                       struct wmi_smbios_priv,
+                                       list);
+}
+
+static int run_smbios_call(struct wmi_device *wdev)
+{
+       struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+       struct wmi_smbios_priv *priv;
+       struct acpi_buffer input;
+       union acpi_object *obj;
+       acpi_status status;
+
+       priv = dev_get_drvdata(&wdev->dev);
+       input.length = priv->req_buf_size - sizeof(u64);
+       input.pointer = &priv->buf->std;
+
+       dev_dbg(&wdev->dev, "evaluating: %u/%u [%x,%x,%x,%x]\n",
+               priv->buf->std.cmd_class, priv->buf->std.cmd_select,
+               priv->buf->std.input[0], priv->buf->std.input[1],
+               priv->buf->std.input[2], priv->buf->std.input[3]);
+
+       status = wmidev_evaluate_method(wdev, 0, 1, &input, &output);
+       if (ACPI_FAILURE(status))
+               return -EIO;
+       obj = (union acpi_object *)output.pointer;
+       if (obj->type != ACPI_TYPE_BUFFER) {
+               dev_dbg(&wdev->dev, "received type: %d\n", obj->type);
+               if (obj->type == ACPI_TYPE_INTEGER)
+                       dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
+                               obj->integer.value);
+               return -EIO;
+       }
+       memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
+       dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
+               priv->buf->std.output[0], priv->buf->std.output[1],
+               priv->buf->std.output[2], priv->buf->std.output[3]);
+
+       return 0;
+}
+
+int dell_smbios_wmi_call(struct calling_interface_buffer *buffer)
+{
+       struct wmi_smbios_priv *priv;
+       size_t difference;
+       size_t size;
+       int ret;
+
+       mutex_lock(&call_mutex);
+       priv = get_first_smbios_priv();
+       if (!priv) {
+               ret = -ENODEV;
+               goto out_wmi_call;
+       }
+
+       size = sizeof(struct calling_interface_buffer);
+       difference = priv->req_buf_size - sizeof(u64) - size;
+
+       memset(&priv->buf->ext, 0, difference);
+       memcpy(&priv->buf->std, buffer, size);
+       ret = run_smbios_call(priv->wdev);
+       memcpy(buffer, &priv->buf->std, size);
+out_wmi_call:
+       mutex_unlock(&call_mutex);
+
+       return ret;
+}
+
+static long dell_smbios_wmi_filter(struct wmi_device *wdev, unsigned int cmd,
+                                  struct wmi_ioctl_buffer *arg)
+{
+       struct wmi_smbios_priv *priv;
+       int ret = 0;
+
+       switch (cmd) {
+       case DELL_WMI_SMBIOS_CMD:
+               mutex_lock(&call_mutex);
+               priv = dev_get_drvdata(&wdev->dev);
+               if (!priv) {
+                       ret = -ENODEV;
+                       goto fail_smbios_cmd;
+               }
+               memcpy(priv->buf, arg, priv->req_buf_size);
+               if (dell_smbios_call_filter(&wdev->dev, &priv->buf->std)) {
+                       dev_err(&wdev->dev, "Invalid call %d/%d:%8x\n",
+                               priv->buf->std.cmd_class,
+                               priv->buf->std.cmd_select,
+                               priv->buf->std.input[0]);
+                       ret = -EFAULT;
+                       goto fail_smbios_cmd;
+               }
+               ret = run_smbios_call(priv->wdev);
+               if (ret)
+                       goto fail_smbios_cmd;
+               memcpy(arg, priv->buf, priv->req_buf_size);
+fail_smbios_cmd:
+               mutex_unlock(&call_mutex);
+               break;
+       default:
+               ret = -ENOIOCTLCMD;
+       }
+       return ret;
+}
+
+static int dell_smbios_wmi_probe(struct wmi_device *wdev)
+{
+       struct wmi_driver *wdriver =
+               container_of(wdev->dev.driver, struct wmi_driver, driver);
+       struct wmi_smbios_priv *priv;
+       u32 hotfix;
+       int count;
+       int ret;
+
+       ret = dell_wmi_get_descriptor_valid();
+       if (ret)
+               return ret;
+
+       priv = devm_kzalloc(&wdev->dev, sizeof(struct wmi_smbios_priv),
+                           GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       /* WMI buffer size will be either 4k or 32k depending on machine */
+       if (!dell_wmi_get_size(&priv->req_buf_size))
+               return -EPROBE_DEFER;
+
+       /* some SMBIOS calls fail unless BIOS contains hotfix */
+       if (!dell_wmi_get_hotfix(&hotfix))
+               return -EPROBE_DEFER;
+       if (!hotfix) {
+               dev_warn(&wdev->dev,
+                       "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
+                       hotfix);
+               wdriver->filter_callback = NULL;
+       }
+
+       /* add in the length object we will use internally with ioctl */
+       priv->req_buf_size += sizeof(u64);
+       ret = set_required_buffer_size(wdev, priv->req_buf_size);
+       if (ret)
+               return ret;
+
+       count = get_order(priv->req_buf_size);
+       priv->buf = (void *)__get_free_pages(GFP_KERNEL, count);
+       if (!priv->buf)
+               return -ENOMEM;
+
+       /* ID is used by dell-smbios to set priority of drivers */
+       wdev->dev.id = 1;
+       ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
+       if (ret)
+               goto fail_register;
+
+       priv->wdev = wdev;
+       dev_set_drvdata(&wdev->dev, priv);
+       mutex_lock(&list_mutex);
+       list_add_tail(&priv->list, &wmi_list);
+       mutex_unlock(&list_mutex);
+
+       return 0;
+
+fail_register:
+       free_pages((unsigned long)priv->buf, count);
+       return ret;
+}
+
+static int dell_smbios_wmi_remove(struct wmi_device *wdev)
+{
+       struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
+       int count;
+
+       mutex_lock(&call_mutex);
+       mutex_lock(&list_mutex);
+       list_del(&priv->list);
+       mutex_unlock(&list_mutex);
+       dell_smbios_unregister_device(&wdev->dev);
+       count = get_order(priv->req_buf_size);
+       free_pages((unsigned long)priv->buf, count);
+       mutex_unlock(&call_mutex);
+       return 0;
+}
+
+static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
+       { .guid_string = DELL_WMI_SMBIOS_GUID },
+       { },
+};
+
+static void __init parse_b1_table(const struct dmi_header *dm)
+{
+       struct misc_bios_flags_structure *flags =
+       container_of(dm, struct misc_bios_flags_structure, header);
+
+       /* 4 bytes header, 8 bytes flags */
+       if (dm->length < 12)
+               return;
+       if (dm->handle != 0xb100)
+               return;
+       if ((flags->flags0 & FLAG_HAS_ACPI_WMI))
+               wmi_supported = 1;
+}
+
+static void __init find_b1(const struct dmi_header *dm, void *dummy)
+{
+       switch (dm->type) {
+       case 0xb1: /* misc bios flags */
+               parse_b1_table(dm);
+               break;
+       }
+}
+
+static struct wmi_driver dell_smbios_wmi_driver = {
+       .driver = {
+               .name = "dell-smbios",
+       },
+       .probe = dell_smbios_wmi_probe,
+       .remove = dell_smbios_wmi_remove,
+       .id_table = dell_smbios_wmi_id_table,
+       .filter_callback = dell_smbios_wmi_filter,
+};
+
+static int __init init_dell_smbios_wmi(void)
+{
+       dmi_walk(find_b1, NULL);
+
+       if (!wmi_supported)
+               return -ENODEV;
+
+       return wmi_driver_register(&dell_smbios_wmi_driver);
+}
+
+static void __exit exit_dell_smbios_wmi(void)
+{
+       wmi_driver_unregister(&dell_smbios_wmi_driver);
+}
+
+module_init(init_dell_smbios_wmi);
+module_exit(exit_dell_smbios_wmi);
+
+MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell SMBIOS communications over WMI");
+MODULE_LICENSE("GPL");
index 0a5723468bfffdd6f16df829e5131c90d7e38034..6a60db515bdabcb490410b35ea85bb7a3fd621f1 100644 (file)
  *  it under the terms of the GNU General Public License version 2 as
  *  published by the Free Software Foundation.
  */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/capability.h>
 #include <linux/dmi.h>
 #include <linux/err.h>
-#include <linux/gfp.h>
 #include <linux/mutex.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/io.h>
-#include "../../firmware/dcdbas.h"
 #include "dell-smbios.h"
 
-struct calling_interface_structure {
-       struct dmi_header header;
-       u16 cmdIOAddress;
-       u8 cmdIOCode;
-       u32 supportedCmds;
-       struct calling_interface_token tokens[];
-} __packed;
-
-static struct calling_interface_buffer *buffer;
-static DEFINE_MUTEX(buffer_mutex);
-
-static int da_command_address;
-static int da_command_code;
+static u32 da_supported_commands;
 static int da_num_tokens;
+static struct platform_device *platform_device;
 static struct calling_interface_token *da_tokens;
+static struct device_attribute *token_location_attrs;
+static struct device_attribute *token_value_attrs;
+static struct attribute **token_attrs;
+static DEFINE_MUTEX(smbios_mutex);
+
+struct smbios_device {
+       struct list_head list;
+       struct device *device;
+       int (*call_fn)(struct calling_interface_buffer *);
+};
+
+struct smbios_call {
+       u32 need_capability;
+       int cmd_class;
+       int cmd_select;
+};
+
+/* calls that are whitelisted for given capabilities */
+static struct smbios_call call_whitelist[] = {
+       /* generally tokens are allowed, but may be further filtered or
+        * restricted by token blacklist or whitelist
+        */
+       {CAP_SYS_ADMIN, CLASS_TOKEN_READ,       SELECT_TOKEN_STD},
+       {CAP_SYS_ADMIN, CLASS_TOKEN_READ,       SELECT_TOKEN_AC},
+       {CAP_SYS_ADMIN, CLASS_TOKEN_READ,       SELECT_TOKEN_BAT},
+       {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE,      SELECT_TOKEN_STD},
+       {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE,      SELECT_TOKEN_AC},
+       {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE,      SELECT_TOKEN_BAT},
+       /* used by userspace: fwupdate */
+       {CAP_SYS_ADMIN, CLASS_ADMIN_PROP,       SELECT_ADMIN_PROP},
+       /* used by userspace: fwupd */
+       {CAP_SYS_ADMIN, CLASS_INFO,             SELECT_DOCK},
+       {CAP_SYS_ADMIN, CLASS_FLASH_INTERFACE,  SELECT_FLASH_INTERFACE},
+};
+
+/* calls that are explicitly blacklisted */
+static struct smbios_call call_blacklist[] = {
+       {0x0000, 01, 07}, /* manufacturing use */
+       {0x0000, 06, 05}, /* manufacturing use */
+       {0x0000, 11, 03}, /* write once */
+       {0x0000, 11, 07}, /* write once */
+       {0x0000, 11, 11}, /* write once */
+       {0x0000, 19, -1}, /* diagnostics */
+       /* handled by kernel: dell-laptop */
+       {0x0000, CLASS_INFO, SELECT_RFKILL},
+       {0x0000, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT},
+};
+
+struct token_range {
+       u32 need_capability;
+       u16 min;
+       u16 max;
+};
+
+/* tokens that are whitelisted for given capabilities */
+static struct token_range token_whitelist[] = {
+       /* used by userspace: fwupdate */
+       {CAP_SYS_ADMIN, CAPSULE_EN_TOKEN,       CAPSULE_DIS_TOKEN},
+       /* can indicate to userspace that WMI is needed */
+       {0x0000,        WSMT_EN_TOKEN,          WSMT_DIS_TOKEN}
+};
+
+/* tokens that are explicitly blacklisted */
+static struct token_range token_blacklist[] = {
+       {0x0000, 0x0058, 0x0059}, /* ME use */
+       {0x0000, 0x00CD, 0x00D0}, /* raid shadow copy */
+       {0x0000, 0x013A, 0x01FF}, /* sata shadow copy */
+       {0x0000, 0x0175, 0x0176}, /* write once */
+       {0x0000, 0x0195, 0x0197}, /* diagnostics */
+       {0x0000, 0x01DC, 0x01DD}, /* manufacturing use */
+       {0x0000, 0x027D, 0x0284}, /* diagnostics */
+       {0x0000, 0x02E3, 0x02E3}, /* manufacturing use */
+       {0x0000, 0x02FF, 0x02FF}, /* manufacturing use */
+       {0x0000, 0x0300, 0x0302}, /* manufacturing use */
+       {0x0000, 0x0325, 0x0326}, /* manufacturing use */
+       {0x0000, 0x0332, 0x0335}, /* fan control */
+       {0x0000, 0x0350, 0x0350}, /* manufacturing use */
+       {0x0000, 0x0363, 0x0363}, /* manufacturing use */
+       {0x0000, 0x0368, 0x0368}, /* manufacturing use */
+       {0x0000, 0x03F6, 0x03F7}, /* manufacturing use */
+       {0x0000, 0x049E, 0x049F}, /* manufacturing use */
+       {0x0000, 0x04A0, 0x04A3}, /* disagnostics */
+       {0x0000, 0x04E6, 0x04E7}, /* manufacturing use */
+       {0x0000, 0x4000, 0x7FFF}, /* internal BIOS use */
+       {0x0000, 0x9000, 0x9001}, /* internal BIOS use */
+       {0x0000, 0xA000, 0xBFFF}, /* write only */
+       {0x0000, 0xEFF0, 0xEFFF}, /* internal BIOS use */
+       /* handled by kernel: dell-laptop */
+       {0x0000, BRIGHTNESS_TOKEN,      BRIGHTNESS_TOKEN},
+       {0x0000, KBD_LED_OFF_TOKEN,     KBD_LED_AUTO_TOKEN},
+       {0x0000, KBD_LED_AC_TOKEN,      KBD_LED_AC_TOKEN},
+       {0x0000, KBD_LED_AUTO_25_TOKEN, KBD_LED_AUTO_75_TOKEN},
+       {0x0000, KBD_LED_AUTO_100_TOKEN,        KBD_LED_AUTO_100_TOKEN},
+       {0x0000, GLOBAL_MIC_MUTE_ENABLE,        GLOBAL_MIC_MUTE_DISABLE},
+};
+
+static LIST_HEAD(smbios_device_list);
 
 int dell_smbios_error(int value)
 {
@@ -55,42 +141,175 @@ int dell_smbios_error(int value)
 }
 EXPORT_SYMBOL_GPL(dell_smbios_error);
 
-struct calling_interface_buffer *dell_smbios_get_buffer(void)
+int dell_smbios_register_device(struct device *d, void *call_fn)
 {
-       mutex_lock(&buffer_mutex);
-       dell_smbios_clear_buffer();
-       return buffer;
+       struct smbios_device *priv;
+
+       priv = devm_kzalloc(d, sizeof(struct smbios_device), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+       get_device(d);
+       priv->device = d;
+       priv->call_fn = call_fn;
+       mutex_lock(&smbios_mutex);
+       list_add_tail(&priv->list, &smbios_device_list);
+       mutex_unlock(&smbios_mutex);
+       dev_dbg(d, "Added device: %s\n", d->driver->name);
+       return 0;
 }
-EXPORT_SYMBOL_GPL(dell_smbios_get_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_register_device);
 
-void dell_smbios_clear_buffer(void)
+void dell_smbios_unregister_device(struct device *d)
 {
-       memset(buffer, 0, sizeof(struct calling_interface_buffer));
+       struct smbios_device *priv;
+
+       mutex_lock(&smbios_mutex);
+       list_for_each_entry(priv, &smbios_device_list, list) {
+               if (priv->device == d) {
+                       list_del(&priv->list);
+                       put_device(d);
+                       break;
+               }
+       }
+       mutex_unlock(&smbios_mutex);
+       dev_dbg(d, "Remove device: %s\n", d->driver->name);
 }
-EXPORT_SYMBOL_GPL(dell_smbios_clear_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_unregister_device);
 
-void dell_smbios_release_buffer(void)
+int dell_smbios_call_filter(struct device *d,
+                           struct calling_interface_buffer *buffer)
 {
-       mutex_unlock(&buffer_mutex);
+       u16 t = 0;
+       int i;
+
+       /* can't make calls over 30 */
+       if (buffer->cmd_class > 30) {
+               dev_dbg(d, "class too big: %u\n", buffer->cmd_class);
+               return -EINVAL;
+       }
+
+       /* supported calls on the particular system */
+       if (!(da_supported_commands & (1 << buffer->cmd_class))) {
+               dev_dbg(d, "invalid command, supported commands: 0x%8x\n",
+                       da_supported_commands);
+               return -EINVAL;
+       }
+
+       /* match against call blacklist  */
+       for (i = 0; i < ARRAY_SIZE(call_blacklist); i++) {
+               if (buffer->cmd_class != call_blacklist[i].cmd_class)
+                       continue;
+               if (buffer->cmd_select != call_blacklist[i].cmd_select &&
+                   call_blacklist[i].cmd_select != -1)
+                       continue;
+               dev_dbg(d, "blacklisted command: %u/%u\n",
+                       buffer->cmd_class, buffer->cmd_select);
+               return -EINVAL;
+       }
+
+       /* if a token call, find token ID */
+
+       if ((buffer->cmd_class == CLASS_TOKEN_READ ||
+            buffer->cmd_class == CLASS_TOKEN_WRITE) &&
+            buffer->cmd_select < 3) {
+               /* find the matching token ID */
+               for (i = 0; i < da_num_tokens; i++) {
+                       if (da_tokens[i].location != buffer->input[0])
+                               continue;
+                       t = da_tokens[i].tokenID;
+                       break;
+               }
+
+               /* token call; but token didn't exist */
+               if (!t) {
+                       dev_dbg(d, "token at location %04x doesn't exist\n",
+                               buffer->input[0]);
+                       return -EINVAL;
+               }
+
+               /* match against token blacklist */
+               for (i = 0; i < ARRAY_SIZE(token_blacklist); i++) {
+                       if (!token_blacklist[i].min || !token_blacklist[i].max)
+                               continue;
+                       if (t >= token_blacklist[i].min &&
+                           t <= token_blacklist[i].max)
+                               return -EINVAL;
+               }
+
+               /* match against token whitelist */
+               for (i = 0; i < ARRAY_SIZE(token_whitelist); i++) {
+                       if (!token_whitelist[i].min || !token_whitelist[i].max)
+                               continue;
+                       if (t < token_whitelist[i].min ||
+                           t > token_whitelist[i].max)
+                               continue;
+                       if (!token_whitelist[i].need_capability ||
+                           capable(token_whitelist[i].need_capability)) {
+                               dev_dbg(d, "whitelisted token: %x\n", t);
+                               return 0;
+                       }
+
+               }
+       }
+       /* match against call whitelist */
+       for (i = 0; i < ARRAY_SIZE(call_whitelist); i++) {
+               if (buffer->cmd_class != call_whitelist[i].cmd_class)
+                       continue;
+               if (buffer->cmd_select != call_whitelist[i].cmd_select)
+                       continue;
+               if (!call_whitelist[i].need_capability ||
+                   capable(call_whitelist[i].need_capability)) {
+                       dev_dbg(d, "whitelisted capable command: %u/%u\n",
+                       buffer->cmd_class, buffer->cmd_select);
+                       return 0;
+               }
+               dev_dbg(d, "missing capability %d for %u/%u\n",
+                       call_whitelist[i].need_capability,
+                       buffer->cmd_class, buffer->cmd_select);
+
+       }
+
+       /* not in a whitelist, only allow processes with capabilities */
+       if (capable(CAP_SYS_RAWIO)) {
+               dev_dbg(d, "Allowing %u/%u due to CAP_SYS_RAWIO\n",
+                       buffer->cmd_class, buffer->cmd_select);
+               return 0;
+       }
+
+       return -EACCES;
 }
-EXPORT_SYMBOL_GPL(dell_smbios_release_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_call_filter);
 
-void dell_smbios_send_request(int class, int select)
+int dell_smbios_call(struct calling_interface_buffer *buffer)
 {
-       struct smi_cmd command;
+       int (*call_fn)(struct calling_interface_buffer *) = NULL;
+       struct device *selected_dev = NULL;
+       struct smbios_device *priv;
+       int ret;
 
-       command.magic = SMI_CMD_MAGIC;
-       command.command_address = da_command_address;
-       command.command_code = da_command_code;
-       command.ebx = virt_to_phys(buffer);
-       command.ecx = 0x42534931;
+       mutex_lock(&smbios_mutex);
+       list_for_each_entry(priv, &smbios_device_list, list) {
+               if (!selected_dev || priv->device->id >= selected_dev->id) {
+                       dev_dbg(priv->device, "Trying device ID: %d\n",
+                               priv->device->id);
+                       call_fn = priv->call_fn;
+                       selected_dev = priv->device;
+               }
+       }
+
+       if (!selected_dev) {
+               ret = -ENODEV;
+               pr_err("No dell-smbios drivers are loaded\n");
+               goto out_smbios_call;
+       }
 
-       buffer->class = class;
-       buffer->select = select;
+       ret = call_fn(buffer);
 
-       dcdbas_smi_request(&command);
+out_smbios_call:
+       mutex_unlock(&smbios_mutex);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(dell_smbios_send_request);
+EXPORT_SYMBOL_GPL(dell_smbios_call);
 
 struct calling_interface_token *dell_smbios_find_token(int tokenid)
 {
@@ -139,8 +358,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
        if (dm->length < 17)
                return;
 
-       da_command_address = table->cmdIOAddress;
-       da_command_code = table->cmdIOCode;
+       da_supported_commands = table->supportedCmds;
 
        new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
                                 sizeof(struct calling_interface_token),
@@ -156,6 +374,27 @@ static void __init parse_da_table(const struct dmi_header *dm)
        da_num_tokens += tokens;
 }
 
+static void zero_duplicates(struct device *dev)
+{
+       int i, j;
+
+       for (i = 0; i < da_num_tokens; i++) {
+               if (da_tokens[i].tokenID == 0)
+                       continue;
+               for (j = i+1; j < da_num_tokens; j++) {
+                       if (da_tokens[j].tokenID == 0)
+                               continue;
+                       if (da_tokens[i].tokenID == da_tokens[j].tokenID) {
+                               dev_dbg(dev, "Zeroing dup token ID %x(%x/%x)\n",
+                                       da_tokens[j].tokenID,
+                                       da_tokens[j].location,
+                                       da_tokens[j].value);
+                               da_tokens[j].tokenID = 0;
+                       }
+               }
+       }
+}
+
 static void __init find_tokens(const struct dmi_header *dm, void *dummy)
 {
        switch (dm->type) {
@@ -169,10 +408,160 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
        }
 }
 
+static int match_attribute(struct device *dev,
+                          struct device_attribute *attr)
+{
+       int i;
+
+       for (i = 0; i < da_num_tokens * 2; i++) {
+               if (!token_attrs[i])
+                       continue;
+               if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
+                       return i/2;
+       }
+       dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
+       return -EINVAL;
+}
+
+static ssize_t location_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       int i;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       i = match_attribute(dev, attr);
+       if (i > 0)
+               return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].location);
+       return 0;
+}
+
+static ssize_t value_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       int i;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       i = match_attribute(dev, attr);
+       if (i > 0)
+               return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].value);
+       return 0;
+}
+
+static struct attribute_group smbios_attribute_group = {
+       .name = "tokens"
+};
+
+static struct platform_driver platform_driver = {
+       .driver = {
+               .name = "dell-smbios",
+       },
+};
+
+static int build_tokens_sysfs(struct platform_device *dev)
+{
+       char *location_name;
+       char *value_name;
+       size_t size;
+       int ret;
+       int i, j;
+
+       /* (number of tokens  + 1 for null terminated */
+       size = sizeof(struct device_attribute) * (da_num_tokens + 1);
+       token_location_attrs = kzalloc(size, GFP_KERNEL);
+       if (!token_location_attrs)
+               return -ENOMEM;
+       token_value_attrs = kzalloc(size, GFP_KERNEL);
+       if (!token_value_attrs)
+               goto out_allocate_value;
+
+       /* need to store both location and value + terminator*/
+       size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
+       token_attrs = kzalloc(size, GFP_KERNEL);
+       if (!token_attrs)
+               goto out_allocate_attrs;
+
+       for (i = 0, j = 0; i < da_num_tokens; i++) {
+               /* skip empty */
+               if (da_tokens[i].tokenID == 0)
+                       continue;
+               /* add location */
+               location_name = kasprintf(GFP_KERNEL, "%04x_location",
+                                         da_tokens[i].tokenID);
+               if (location_name == NULL)
+                       goto out_unwind_strings;
+               sysfs_attr_init(&token_location_attrs[i].attr);
+               token_location_attrs[i].attr.name = location_name;
+               token_location_attrs[i].attr.mode = 0444;
+               token_location_attrs[i].show = location_show;
+               token_attrs[j++] = &token_location_attrs[i].attr;
+
+               /* add value */
+               value_name = kasprintf(GFP_KERNEL, "%04x_value",
+                                      da_tokens[i].tokenID);
+               if (value_name == NULL)
+                       goto loop_fail_create_value;
+               sysfs_attr_init(&token_value_attrs[i].attr);
+               token_value_attrs[i].attr.name = value_name;
+               token_value_attrs[i].attr.mode = 0444;
+               token_value_attrs[i].show = value_show;
+               token_attrs[j++] = &token_value_attrs[i].attr;
+               continue;
+
+loop_fail_create_value:
+               kfree(value_name);
+               goto out_unwind_strings;
+       }
+       smbios_attribute_group.attrs = token_attrs;
+
+       ret = sysfs_create_group(&dev->dev.kobj, &smbios_attribute_group);
+       if (ret)
+               goto out_unwind_strings;
+       return 0;
+
+out_unwind_strings:
+       for (i = i-1; i > 0; i--) {
+               kfree(token_location_attrs[i].attr.name);
+               kfree(token_value_attrs[i].attr.name);
+       }
+       kfree(token_attrs);
+out_allocate_attrs:
+       kfree(token_value_attrs);
+out_allocate_value:
+       kfree(token_location_attrs);
+
+       return -ENOMEM;
+}
+
+static void free_group(struct platform_device *pdev)
+{
+       int i;
+
+       sysfs_remove_group(&pdev->dev.kobj,
+                               &smbios_attribute_group);
+       for (i = 0; i < da_num_tokens; i++) {
+               kfree(token_location_attrs[i].attr.name);
+               kfree(token_value_attrs[i].attr.name);
+       }
+       kfree(token_attrs);
+       kfree(token_value_attrs);
+       kfree(token_location_attrs);
+}
+
 static int __init dell_smbios_init(void)
 {
+       const struct dmi_device *valid;
        int ret;
 
+       valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL);
+       if (!valid) {
+               pr_err("Unable to run on non-Dell system\n");
+               return -ENODEV;
+       }
+
        dmi_walk(find_tokens, NULL);
 
        if (!da_tokens)  {
@@ -180,27 +569,52 @@ static int __init dell_smbios_init(void)
                return -ENODEV;
        }
 
-       /*
-        * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
-        * is passed to SMI handler.
-        */
-       buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
-       if (!buffer) {
+       ret = platform_driver_register(&platform_driver);
+       if (ret)
+               goto fail_platform_driver;
+
+       platform_device = platform_device_alloc("dell-smbios", 0);
+       if (!platform_device) {
                ret = -ENOMEM;
-               goto fail_buffer;
+               goto fail_platform_device_alloc;
        }
+       ret = platform_device_add(platform_device);
+       if (ret)
+               goto fail_platform_device_add;
+
+       /* duplicate tokens will cause problems building sysfs files */
+       zero_duplicates(&platform_device->dev);
+
+       ret = build_tokens_sysfs(platform_device);
+       if (ret)
+               goto fail_create_group;
 
        return 0;
 
-fail_buffer:
+fail_create_group:
+       platform_device_del(platform_device);
+
+fail_platform_device_add:
+       platform_device_put(platform_device);
+
+fail_platform_device_alloc:
+       platform_driver_unregister(&platform_driver);
+
+fail_platform_driver:
        kfree(da_tokens);
        return ret;
 }
 
 static void __exit dell_smbios_exit(void)
 {
+       mutex_lock(&smbios_mutex);
+       if (platform_device) {
+               free_group(platform_device);
+               platform_device_unregister(platform_device);
+               platform_driver_unregister(&platform_driver);
+       }
        kfree(da_tokens);
-       free_page((unsigned long)buffer);
+       mutex_unlock(&smbios_mutex);
 }
 
 subsys_initcall(dell_smbios_init);
index 45cbc2292cd3d703fae5763d05641c1d3e7ec666..138d478d9adc91bdceee7194567cbe40718ec1f7 100644 (file)
 #ifndef _DELL_SMBIOS_H_
 #define _DELL_SMBIOS_H_
 
-struct notifier_block;
+#include <linux/device.h>
+#include <uapi/linux/wmi.h>
 
-/* This structure will be modified by the firmware when we enter
- * system management mode, hence the volatiles */
+/* Classes and selects used only in kernel drivers */
+#define CLASS_KBD_BACKLIGHT 4
+#define SELECT_KBD_BACKLIGHT 11
 
-struct calling_interface_buffer {
-       u16 class;
-       u16 select;
-       volatile u32 input[4];
-       volatile u32 output[4];
-} __packed;
+/* Tokens used in kernel drivers, any of these
+ * should be filtered from userspace access
+ */
+#define BRIGHTNESS_TOKEN       0x007d
+#define KBD_LED_AC_TOKEN       0x0451
+#define KBD_LED_OFF_TOKEN      0x01E1
+#define KBD_LED_ON_TOKEN       0x01E2
+#define KBD_LED_AUTO_TOKEN     0x01E3
+#define KBD_LED_AUTO_25_TOKEN  0x02EA
+#define KBD_LED_AUTO_50_TOKEN  0x02EB
+#define KBD_LED_AUTO_75_TOKEN  0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
+#define GLOBAL_MIC_MUTE_ENABLE 0x0364
+#define GLOBAL_MIC_MUTE_DISABLE        0x0365
+
+struct notifier_block;
 
 struct calling_interface_token {
        u16 tokenID;
@@ -37,12 +49,21 @@ struct calling_interface_token {
        };
 };
 
-int dell_smbios_error(int value);
+struct calling_interface_structure {
+       struct dmi_header header;
+       u16 cmdIOAddress;
+       u8 cmdIOCode;
+       u32 supportedCmds;
+       struct calling_interface_token tokens[];
+} __packed;
 
-struct calling_interface_buffer *dell_smbios_get_buffer(void);
-void dell_smbios_clear_buffer(void);
-void dell_smbios_release_buffer(void);
-void dell_smbios_send_request(int class, int select);
+int dell_smbios_register_device(struct device *d, void *call_fn);
+void dell_smbios_unregister_device(struct device *d);
+
+int dell_smbios_error(int value);
+int dell_smbios_call_filter(struct device *d,
+       struct calling_interface_buffer *buffer);
+int dell_smbios_call(struct calling_interface_buffer *buffer);
 
 struct calling_interface_token *dell_smbios_find_token(int tokenid);
 
index 37e646034ef8c07dbeec5317b64433be6aa1bf49..1d87237bc731b05c4d4706b560fdc45dc407a241 100644 (file)
@@ -90,7 +90,7 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
                                         struct smo8800_device, miscdev);
 
        u32 data = 0;
-       unsigned char byte_data = 0;
+       unsigned char byte_data;
        ssize_t retval = 1;
 
        if (count < 1)
@@ -103,7 +103,6 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
        if (retval)
                return retval;
 
-       byte_data = 1;
        retval = 1;
 
        if (data < 255)
diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c
new file mode 100644 (file)
index 0000000..072821a
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Dell WMI descriptor driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+#include "dell-wmi-descriptor.h"
+
+#define DELL_WMI_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
+
+struct descriptor_priv {
+       struct list_head list;
+       u32 interface_version;
+       u32 size;
+       u32 hotfix;
+};
+static int descriptor_valid = -EPROBE_DEFER;
+static LIST_HEAD(wmi_list);
+static DEFINE_MUTEX(list_mutex);
+
+int dell_wmi_get_descriptor_valid(void)
+{
+       if (!wmi_has_guid(DELL_WMI_DESCRIPTOR_GUID))
+               return -ENODEV;
+
+       return descriptor_valid;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_descriptor_valid);
+
+bool dell_wmi_get_interface_version(u32 *version)
+{
+       struct descriptor_priv *priv;
+       bool ret = false;
+
+       mutex_lock(&list_mutex);
+       priv = list_first_entry_or_null(&wmi_list,
+                                       struct descriptor_priv,
+                                       list);
+       if (priv) {
+               *version = priv->interface_version;
+               ret = true;
+       }
+       mutex_unlock(&list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_interface_version);
+
+bool dell_wmi_get_size(u32 *size)
+{
+       struct descriptor_priv *priv;
+       bool ret = false;
+
+       mutex_lock(&list_mutex);
+       priv = list_first_entry_or_null(&wmi_list,
+                                       struct descriptor_priv,
+                                       list);
+       if (priv) {
+               *size = priv->size;
+               ret = true;
+       }
+       mutex_unlock(&list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_size);
+
+bool dell_wmi_get_hotfix(u32 *hotfix)
+{
+       struct descriptor_priv *priv;
+       bool ret = false;
+
+       mutex_lock(&list_mutex);
+       priv = list_first_entry_or_null(&wmi_list,
+                                       struct descriptor_priv,
+                                       list);
+       if (priv) {
+               *hotfix = priv->hotfix;
+               ret = true;
+       }
+       mutex_unlock(&list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
+
+/*
+ * Descriptor buffer is 128 byte long and contains:
+ *
+ *       Name             Offset  Length  Value
+ * Vendor Signature          0       4    "DELL"
+ * Object Signature          4       4    " WMI"
+ * WMI Interface Version     8       4    <version>
+ * WMI buffer length        12       4    <length>
+ * WMI hotfix number        16       4    <hotfix>
+ */
+static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
+{
+       union acpi_object *obj = NULL;
+       struct descriptor_priv *priv;
+       u32 *buffer;
+       int ret;
+
+       obj = wmidev_block_query(wdev, 0);
+       if (!obj) {
+               dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
+               ret = -EIO;
+               goto out;
+       }
+
+       if (obj->type != ACPI_TYPE_BUFFER) {
+               dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
+               ret = -EINVAL;
+               descriptor_valid = ret;
+               goto out;
+       }
+
+       /* Although it's not technically a failure, this would lead to
+        * unexpected behavior
+        */
+       if (obj->buffer.length != 128) {
+               dev_err(&wdev->dev,
+                       "Dell descriptor buffer has unexpected length (%d)\n",
+                       obj->buffer.length);
+               ret = -EINVAL;
+               descriptor_valid = ret;
+               goto out;
+       }
+
+       buffer = (u32 *)obj->buffer.pointer;
+
+       if (strncmp(obj->string.pointer, "DELL WMI", 8) != 0) {
+               dev_err(&wdev->dev, "Dell descriptor buffer has invalid signature (%8ph)\n",
+                       buffer);
+               ret = -EINVAL;
+               descriptor_valid = ret;
+               goto out;
+       }
+       descriptor_valid = 0;
+
+       if (buffer[2] != 0 && buffer[2] != 1)
+               dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%lu)\n",
+                       (unsigned long) buffer[2]);
+
+       priv = devm_kzalloc(&wdev->dev, sizeof(struct descriptor_priv),
+       GFP_KERNEL);
+
+       if (!priv) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       priv->interface_version = buffer[2];
+       priv->size = buffer[3];
+       priv->hotfix = buffer[4];
+       ret = 0;
+       dev_set_drvdata(&wdev->dev, priv);
+       mutex_lock(&list_mutex);
+       list_add_tail(&priv->list, &wmi_list);
+       mutex_unlock(&list_mutex);
+
+       dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
+               (unsigned long) priv->interface_version,
+               (unsigned long) priv->size,
+               (unsigned long) priv->hotfix);
+
+out:
+       kfree(obj);
+       return ret;
+}
+
+static int dell_wmi_descriptor_remove(struct wmi_device *wdev)
+{
+       struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
+
+       mutex_lock(&list_mutex);
+       list_del(&priv->list);
+       mutex_unlock(&list_mutex);
+       return 0;
+}
+
+static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
+       { .guid_string = DELL_WMI_DESCRIPTOR_GUID },
+       { },
+};
+
+static struct wmi_driver dell_wmi_descriptor_driver = {
+       .driver = {
+               .name = "dell-wmi-descriptor",
+       },
+       .probe = dell_wmi_descriptor_probe,
+       .remove = dell_wmi_descriptor_remove,
+       .id_table = dell_wmi_descriptor_id_table,
+};
+
+module_wmi_driver(dell_wmi_descriptor_driver);
+
+MODULE_ALIAS("wmi:" DELL_WMI_DESCRIPTOR_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell WMI descriptor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-wmi-descriptor.h b/drivers/platform/x86/dell-wmi-descriptor.h
new file mode 100644 (file)
index 0000000..a6123a4
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *  Dell WMI descriptor driver
+ *
+ *  Copyright (c) 2017 Dell Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#ifndef _DELL_WMI_DESCRIPTOR_H_
+#define _DELL_WMI_DESCRIPTOR_H_
+
+#include <linux/wmi.h>
+
+/* possible return values:
+ *  -ENODEV: Descriptor GUID missing from WMI bus
+ *  -EPROBE_DEFER: probing for dell-wmi-descriptor not yet run
+ *  0: valid descriptor, successfully probed
+ *  < 0: invalid descriptor, don't probe dependent devices
+ */
+int dell_wmi_get_descriptor_valid(void);
+
+bool dell_wmi_get_interface_version(u32 *version);
+bool dell_wmi_get_size(u32 *size);
+bool dell_wmi_get_hotfix(u32 *hotfix);
+
+#endif
index 28d9f8696081bcb063a09e9b1c481ede0259e049..39d2f451848332d8346201b9cc1cd08bb5e10427 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/wmi.h>
 #include <acpi/video.h>
 #include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
 
 MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
 MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
@@ -46,12 +47,10 @@ MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
 MODULE_LICENSE("GPL");
 
 #define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492"
-#define DELL_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
 
 static bool wmi_requires_smbios_request;
 
 MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
-MODULE_ALIAS("wmi:"DELL_DESCRIPTOR_GUID);
 
 struct dell_wmi_priv {
        struct input_dev *input_dev;
@@ -618,78 +617,6 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev)
        input_unregister_device(priv->input_dev);
 }
 
-/*
- * Descriptor buffer is 128 byte long and contains:
- *
- *       Name             Offset  Length  Value
- * Vendor Signature          0       4    "DELL"
- * Object Signature          4       4    " WMI"
- * WMI Interface Version     8       4    <version>
- * WMI buffer length        12       4    4096
- */
-static int dell_wmi_check_descriptor_buffer(struct wmi_device *wdev)
-{
-       struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
-       union acpi_object *obj = NULL;
-       struct wmi_device *desc_dev;
-       u32 *buffer;
-       int ret;
-
-       desc_dev = wmidev_get_other_guid(wdev, DELL_DESCRIPTOR_GUID);
-       if (!desc_dev) {
-               dev_err(&wdev->dev, "Dell WMI descriptor does not exist\n");
-               return -ENODEV;
-       }
-
-       obj = wmidev_block_query(desc_dev, 0);
-       if (!obj) {
-               dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
-               ret = -EIO;
-               goto out;
-       }
-
-       if (obj->type != ACPI_TYPE_BUFFER) {
-               dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (obj->buffer.length != 128) {
-               dev_err(&wdev->dev,
-                       "Dell descriptor buffer has invalid length (%d)\n",
-                       obj->buffer.length);
-               if (obj->buffer.length < 16) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-       }
-
-       buffer = (u32 *)obj->buffer.pointer;
-
-       if (buffer[0] != 0x4C4C4544 && buffer[1] != 0x494D5720)
-               dev_warn(&wdev->dev, "Dell descriptor buffer has invalid signature (%*ph)\n",
-                       8, buffer);
-
-       if (buffer[2] != 0 && buffer[2] != 1)
-               dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%d)\n",
-                       buffer[2]);
-
-       if (buffer[3] != 4096)
-               dev_warn(&wdev->dev, "Dell descriptor buffer has invalid buffer length (%d)\n",
-                       buffer[3]);
-
-       priv->interface_version = buffer[2];
-       ret = 0;
-
-       dev_info(&wdev->dev, "Detected Dell WMI interface version %u\n",
-               priv->interface_version);
-
-out:
-       kfree(obj);
-       put_device(&desc_dev->dev);
-       return ret;
-}
-
 /*
  * According to Dell SMBIOS documentation:
  *
@@ -711,13 +638,16 @@ static int dell_wmi_events_set_enabled(bool enable)
        struct calling_interface_buffer *buffer;
        int ret;
 
-       buffer = dell_smbios_get_buffer();
+       buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+       buffer->cmd_class = CLASS_INFO;
+       buffer->cmd_select = SELECT_APP_REGISTRATION;
        buffer->input[0] = 0x10000;
        buffer->input[1] = 0x51534554;
        buffer->input[3] = enable;
-       dell_smbios_send_request(17, 3);
-       ret = buffer->output[0];
-       dell_smbios_release_buffer();
+       ret = dell_smbios_call(buffer);
+       if (ret == 0)
+               ret = buffer->output[0];
+       kfree(buffer);
 
        return dell_smbios_error(ret);
 }
@@ -725,7 +655,11 @@ static int dell_wmi_events_set_enabled(bool enable)
 static int dell_wmi_probe(struct wmi_device *wdev)
 {
        struct dell_wmi_priv *priv;
-       int err;
+       int ret;
+
+       ret = dell_wmi_get_descriptor_valid();
+       if (ret)
+               return ret;
 
        priv = devm_kzalloc(
                &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
@@ -733,9 +667,8 @@ static int dell_wmi_probe(struct wmi_device *wdev)
                return -ENOMEM;
        dev_set_drvdata(&wdev->dev, priv);
 
-       err = dell_wmi_check_descriptor_buffer(wdev);
-       if (err)
-               return err;
+       if (!dell_wmi_get_interface_version(&priv->interface_version))
+               return -EPROBE_DEFER;
 
        return dell_wmi_input_setup(wdev);
 }
index 56a8195096a229c6975d3f78746ecbc4c6169660..2cfbd3fa5136002362bdc558c14d00e40ed56e22 100644 (file)
@@ -691,6 +691,7 @@ static enum led_brightness eco_led_get(struct led_classdev *cdev)
 
 static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
 {
+       struct fujitsu_laptop *priv = acpi_driver_data(device);
        struct led_classdev *led;
        int result;
 
@@ -724,12 +725,15 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
        }
 
        /*
-        * BTNI bit 24 seems to indicate the presence of a radio toggle
-        * button in place of a slide switch, and all such machines appear
-        * to also have an RF LED.  Therefore use bit 24 as an indicator
-        * that an RF LED is present.
+        * Some Fujitsu laptops have a radio toggle button in place of a slide
+        * switch and all such machines appear to also have an RF LED.  Based on
+        * comparing DSDT tables of four Fujitsu Lifebook models (E744, E751,
+        * S7110, S8420; the first one has a radio toggle button, the other
+        * three have slide switches), bit 17 of flags_supported (the value
+        * returned by method S000 of ACPI device FUJ02E3) seems to indicate
+        * whether given model has a radio toggle button.
         */
-       if (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+       if (priv->flags_supported & BIT(17)) {
                led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
                if (!led)
                        return -ENOMEM;
index b4ed3dc983d5229c7e1726da58789a937a297d39..b4224389febebe4688ea2195d8a3a786ba3c2081 100644 (file)
@@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask)
        if (state < 0)
                return state;
 
-       return state & 0x1;
+       return !!(state & mask);
 }
 
 static int __init hp_wmi_bios_2008_later(void)
index 493d8910a74e2eca10812c3202a74cdb21ed37cd..7b12abe86b94f3f71d1fc9cb85d5828554ee8cfb 100644 (file)
@@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
        AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
        AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
+       AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
index fe98d4ac0df37040c3da8ad470bf7a33c5477fbe..53ab4e0f896255614b2462d5323a03e53959a73e 100644 (file)
@@ -1166,6 +1166,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
                },
        },
+       {
+               .ident = "Lenovo YOGA 920-13IKB",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"),
+               },
+       },
        {}
 };
 
index e34fd70b67afe4d2573f8c4c39dcc4e1290ef97e..f470279c4c100424815c45dacc1304193677ae8a 100644 (file)
@@ -226,6 +226,24 @@ wakeup:
                return;
        }
 
+       /*
+        * Needed for suspend to work on some platforms that don't expose
+        * the 5-button array, but still send notifies with power button
+        * event code to this device object on power button actions.
+        *
+        * Report the power button press; catch and ignore the button release.
+        */
+       if (!priv->array) {
+               if (event == 0xce) {
+                       input_report_key(priv->input_dev, KEY_POWER, 1);
+                       input_sync(priv->input_dev);
+                       return;
+               }
+
+               if (event == 0xcf)
+                       return;
+       }
+
        /* 0xC0 is for HID events, other values are for 5 button array */
        if (event != 0xc0) {
                if (!priv->array ||
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c
new file mode 100644 (file)
index 0000000..c2257bd
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * WMI Thunderbolt driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+static ssize_t force_power_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct acpi_buffer input;
+       acpi_status status;
+       u8 mode;
+
+       input.length = sizeof(u8);
+       input.pointer = &mode;
+       mode = hex_to_bin(buf[0]);
+       if (mode == 0 || mode == 1) {
+               status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1,
+                                            &input, NULL);
+               if (ACPI_FAILURE(status))
+                       return -ENODEV;
+       } else {
+               return -EINVAL;
+       }
+       return count;
+}
+
+static DEVICE_ATTR_WO(force_power);
+
+static struct attribute *tbt_attrs[] = {
+       &dev_attr_force_power.attr,
+       NULL
+};
+
+static const struct attribute_group tbt_attribute_group = {
+       .attrs = tbt_attrs,
+};
+
+static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev)
+{
+       int ret;
+
+       ret = sysfs_create_group(&wdev->dev.kobj, &tbt_attribute_group);
+       kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
+       return ret;
+}
+
+static int intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
+{
+       sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
+       kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
+       return 0;
+}
+
+static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
+       { .guid_string = INTEL_WMI_THUNDERBOLT_GUID },
+       { },
+};
+
+static struct wmi_driver intel_wmi_thunderbolt_driver = {
+       .driver = {
+               .name = "intel-wmi-thunderbolt",
+       },
+       .probe = intel_wmi_thunderbolt_probe,
+       .remove = intel_wmi_thunderbolt_remove,
+       .id_table = intel_wmi_thunderbolt_id_table,
+};
+
+module_wmi_driver(intel_wmi_thunderbolt_driver);
+
+MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver");
+MODULE_LICENSE("GPL");
index da706e2c4232cf25d40aefd52f54a910ab437e6f..380ef7ec094f309d42e30a08c97924b22c8496ce 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 
 #define EXPECTED_PTYPE         4
@@ -34,6 +35,42 @@ struct cht_int33fe_data {
        struct i2c_client *pi3usb30532;
 };
 
+/*
+ * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
+ * the max17047 both through the INT33FE ACPI device (it is right there
+ * in the resources table) as well as through a separate MAX17047 device.
+ *
+ * These helpers are used to work around this by checking if an i2c-client
+ * for the max17047 has already been registered.
+ */
+static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
+{
+       struct i2c_client **max17047 = data;
+       struct acpi_device *adev;
+       const char *hid;
+
+       adev = ACPI_COMPANION(dev);
+       if (!adev)
+               return 0;
+
+       hid = acpi_device_hid(adev);
+
+       /* The MAX17047 ACPI node doesn't have an UID, so we don't check that */
+       if (strcmp(hid, "MAX17047"))
+               return 0;
+
+       *max17047 = to_i2c_client(dev);
+       return 1;
+}
+
+static struct i2c_client *cht_int33fe_find_max17047(void)
+{
+       struct i2c_client *max17047 = NULL;
+
+       i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
+       return max17047;
+}
+
 static const char * const max17047_suppliers[] = { "bq24190-charger" };
 
 static const struct property_entry max17047_props[] = {
@@ -41,14 +78,25 @@ static const struct property_entry max17047_props[] = {
        { }
 };
 
+static const struct property_entry fusb302_props[] = {
+       PROPERTY_ENTRY_STRING("fcs,extcon-name", "cht_wcove_pwrsrc"),
+       PROPERTY_ENTRY_U32("fcs,max-sink-microvolt", 12000000),
+       PROPERTY_ENTRY_U32("fcs,max-sink-microamp",   3000000),
+       PROPERTY_ENTRY_U32("fcs,max-sink-microwatt", 36000000),
+       { }
+};
+
 static int cht_int33fe_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
        struct i2c_board_info board_info;
        struct cht_int33fe_data *data;
+       struct i2c_client *max17047;
+       struct regulator *regulator;
        unsigned long long ptyp;
        acpi_status status;
        int fusb302_irq;
+       int ret;
 
        status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
        if (ACPI_FAILURE(status)) {
@@ -63,6 +111,34 @@ static int cht_int33fe_probe(struct i2c_client *client)
        if (ptyp != EXPECTED_PTYPE)
                return -ENODEV;
 
+       /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */
+       if (!acpi_dev_present("INT34D3", "1", 3)) {
+               dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n",
+                       EXPECTED_PTYPE);
+               return -ENODEV;
+       }
+
+       /*
+        * We expect the WC PMIC to be paired with a TI bq24292i charger-IC.
+        * We check for the bq24292i vbus regulator here, this has 2 purposes:
+        * 1) The bq24292i allows charging with up to 12V, setting the fusb302's
+        *    max-snk voltage to 12V with another charger-IC is not good.
+        * 2) For the fusb302 driver to get the bq24292i vbus regulator, the
+        *    regulator-map, which is part of the bq24292i regulator_init_data,
+        *    must be registered before the fusb302 is instantiated, otherwise
+        *    it will end up with a dummy-regulator.
+        * Note "cht_wc_usb_typec_vbus" comes from the regulator_init_data
+        * which is defined in i2c-cht-wc.c from where the bq24292i i2c-client
+        * gets instantiated. We use regulator_get_optional here so that we
+        * don't end up getting a dummy-regulator ourselves.
+        */
+       regulator = regulator_get_optional(dev, "cht_wc_usb_typec_vbus");
+       if (IS_ERR(regulator)) {
+               ret = PTR_ERR(regulator);
+               return (ret == -ENODEV) ? -EPROBE_DEFER : ret;
+       }
+       regulator_put(regulator);
+
        /* The FUSB302 uses the irq at index 1 and is the only irq user */
        fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1);
        if (fusb302_irq < 0) {
@@ -75,16 +151,31 @@ static int cht_int33fe_probe(struct i2c_client *client)
        if (!data)
                return -ENOMEM;
 
-       memset(&board_info, 0, sizeof(board_info));
-       strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
-       board_info.properties = max17047_props;
-
-       data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
-       if (!data->max17047)
-               return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
+       /* Work around BIOS bug, see comment on cht_int33fe_find_max17047 */
+       max17047 = cht_int33fe_find_max17047();
+       if (max17047) {
+               /* Pre-existing i2c-client for the max17047, add device-props */
+               ret = device_add_properties(&max17047->dev, max17047_props);
+               if (ret)
+                       return ret;
+               /* And re-probe to get the new device-props applied. */
+               ret = device_reprobe(&max17047->dev);
+               if (ret)
+                       dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
+       } else {
+               memset(&board_info, 0, sizeof(board_info));
+               strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+               board_info.dev_name = "max17047";
+               board_info.properties = max17047_props;
+               data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
+               if (!data->max17047)
+                       return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
+       }
 
        memset(&board_info, 0, sizeof(board_info));
-       strlcpy(board_info.type, "fusb302", I2C_NAME_SIZE);
+       strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+       board_info.dev_name = "fusb302";
+       board_info.properties = fusb302_props;
        board_info.irq = fusb302_irq;
 
        data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
@@ -92,6 +183,7 @@ static int cht_int33fe_probe(struct i2c_client *client)
                goto out_unregister_max17047;
 
        memset(&board_info, 0, sizeof(board_info));
+       board_info.dev_name = "pi3usb30532";
        strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
 
        data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
@@ -106,7 +198,8 @@ out_unregister_fusb302:
        i2c_unregister_device(data->fusb302);
 
 out_unregister_max17047:
-       i2c_unregister_device(data->max17047);
+       if (data->max17047)
+               i2c_unregister_device(data->max17047);
 
        return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
 }
@@ -117,7 +210,8 @@ static int cht_int33fe_remove(struct i2c_client *i2c)
 
        i2c_unregister_device(data->pi3usb30532);
        i2c_unregister_device(data->fusb302);
-       i2c_unregister_device(data->max17047);
+       if (data->max17047)
+               i2c_unregister_device(data->max17047);
 
        return 0;
 }
index 58dcee562d6417be3e1d7ee52fc91c384469cffa..a0c95853fd3f98abbe1979ad2478d6b781c7b046 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -259,8 +255,6 @@ static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */
 
 /* Per-SKU limits */
 struct ips_mcp_limits {
-       int cpu_family;
-       int cpu_model; /* includes extended model... */
        int mcp_power_limit; /* mW units */
        int core_power_limit;
        int mch_power_limit;
@@ -295,11 +289,14 @@ static struct ips_mcp_limits ips_ulv_limits = {
 };
 
 struct ips_driver {
-       struct pci_dev *dev;
-       void *regmap;
+       struct device *dev;
+       void __iomem *regmap;
+       int irq;
+
        struct task_struct *monitor;
        struct task_struct *adjust;
        struct dentry *debug_root;
+       struct timer_list timer;
 
        /* Average CPU core temps (all averages in .01 degrees C for precision) */
        u16 ctv1_avg_temp;
@@ -594,7 +591,7 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
                return;
 
        if (!ips->gpu_turbo_disable())
-               dev_err(&ips->dev->dev, "failed to disable graphics turbo\n");
+               dev_err(ips->dev, "failed to disable graphics turbo\n");
        else
                ips->__gpu_turbo_on = false;
 }
@@ -649,8 +646,7 @@ static bool cpu_exceeded(struct ips_driver *ips, int cpu)
        spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
 
        if (ret)
-               dev_info(&ips->dev->dev,
-                        "CPU power or thermal limit exceeded\n");
+               dev_info(ips->dev, "CPU power or thermal limit exceeded\n");
 
        return ret;
 }
@@ -769,7 +765,7 @@ static int ips_adjust(void *data)
        struct ips_driver *ips = data;
        unsigned long flags;
 
-       dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n");
+       dev_dbg(ips->dev, "starting ips-adjust thread\n");
 
        /*
         * Adjust CPU and GPU clamps every 5s if needed.  Doing it more
@@ -816,7 +812,7 @@ sleep:
                schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
        } while (!kthread_should_stop());
 
-       dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
+       dev_dbg(ips->dev, "ips-adjust thread stopped\n");
 
        return 0;
 }
@@ -942,9 +938,10 @@ static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
        return avg;
 }
 
-static void monitor_timeout(unsigned long arg)
+static void monitor_timeout(struct timer_list *t)
 {
-       wake_up_process((struct task_struct *)arg);
+       struct ips_driver *ips = from_timer(ips, t, timer);
+       wake_up_process(ips->monitor);
 }
 
 /**
@@ -961,7 +958,6 @@ static void monitor_timeout(unsigned long arg)
 static int ips_monitor(void *data)
 {
        struct ips_driver *ips = data;
-       struct timer_list timer;
        unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
        int i;
        u32 *cpu_samples, *mchp_samples, old_cpu_power;
@@ -976,7 +972,7 @@ static int ips_monitor(void *data)
        mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
        if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
                        !cpu_samples || !mchp_samples) {
-               dev_err(&ips->dev->dev,
+               dev_err(ips->dev,
                        "failed to allocate sample array, ips disabled\n");
                kfree(mcp_samples);
                kfree(ctv1_samples);
@@ -1049,8 +1045,7 @@ static int ips_monitor(void *data)
        schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
        last_sample_period = IPS_SAMPLE_PERIOD;
 
-       setup_deferrable_timer_on_stack(&timer, monitor_timeout,
-                                       (unsigned long)current);
+       timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
        do {
                u32 cpu_val, mch_val;
                u16 val;
@@ -1097,7 +1092,8 @@ static int ips_monitor(void *data)
                        ITV_ME_SEQNO_SHIFT;
                if (cur_seqno == last_seqno &&
                    time_after(jiffies, seqno_timestamp + HZ)) {
-                       dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n");
+                       dev_warn(ips->dev,
+                                "ME failed to update for more than 1s, likely hung\n");
                } else {
                        seqno_timestamp = get_jiffies_64();
                        last_seqno = cur_seqno;
@@ -1107,7 +1103,7 @@ static int ips_monitor(void *data)
                expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
 
                __set_current_state(TASK_INTERRUPTIBLE);
-               mod_timer(&timer, expire);
+               mod_timer(&ips->timer, expire);
                schedule();
 
                /* Calculate actual sample period for power averaging */
@@ -1116,10 +1112,9 @@ static int ips_monitor(void *data)
                        last_sample_period = 1;
        } while (!kthread_should_stop());
 
-       del_timer_sync(&timer);
-       destroy_timer_on_stack(&timer);
+       del_timer_sync(&ips->timer);
 
-       dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n");
+       dev_dbg(ips->dev, "ips-monitor thread stopped\n");
 
        return 0;
 }
@@ -1128,17 +1123,17 @@ static int ips_monitor(void *data)
 #define THM_DUMPW(reg) \
        { \
        u16 val = thm_readw(reg); \
-       dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \
+       dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \
        }
 #define THM_DUMPL(reg) \
        { \
        u32 val = thm_readl(reg); \
-       dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \
+       dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \
        }
 #define THM_DUMPQ(reg) \
        { \
        u64 val = thm_readq(reg); \
-       dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \
+       dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \
        }
 
 static void dump_thermal_info(struct ips_driver *ips)
@@ -1146,7 +1141,7 @@ static void dump_thermal_info(struct ips_driver *ips)
        u16 ptl;
 
        ptl = thm_readw(THM_PTL);
-       dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl);
+       dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl);
 
        THM_DUMPW(THM_CTA);
        THM_DUMPW(THM_TRC);
@@ -1175,8 +1170,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
        if (!tses && !tes)
                return IRQ_NONE;
 
-       dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses);
-       dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes);
+       dev_info(ips->dev, "TSES: 0x%02x\n", tses);
+       dev_info(ips->dev, "TES: 0x%02x\n", tes);
 
        /* STS update from EC? */
        if (tes & 1) {
@@ -1214,8 +1209,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
 
        /* Thermal trip */
        if (tses) {
-               dev_warn(&ips->dev->dev,
-                        "thermal trip occurred, tses: 0x%04x\n", tses);
+               dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n",
+                        tses);
                thm_writeb(THM_TSES, tses);
        }
 
@@ -1330,8 +1325,7 @@ static void ips_debugfs_init(struct ips_driver *ips)
 
        ips->debug_root = debugfs_create_dir("ips", NULL);
        if (!ips->debug_root) {
-               dev_err(&ips->dev->dev,
-                       "failed to create debugfs entries: %ld\n",
+               dev_err(ips->dev, "failed to create debugfs entries: %ld\n",
                        PTR_ERR(ips->debug_root));
                return;
        }
@@ -1345,8 +1339,7 @@ static void ips_debugfs_init(struct ips_driver *ips)
                                          ips->debug_root, node,
                                          &ips_debugfs_ops);
                if (!ent) {
-                       dev_err(&ips->dev->dev,
-                               "failed to create debug file: %ld\n",
+                       dev_err(ips->dev, "failed to create debug file: %ld\n",
                                PTR_ERR(ent));
                        goto err_cleanup;
                }
@@ -1373,8 +1366,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
        u16 tdp;
 
        if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
-               dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n");
-               goto out;
+               dev_info(ips->dev, "Non-IPS CPU detected.\n");
+               return NULL;
        }
 
        rdmsrl(IA32_MISC_ENABLE, misc_en);
@@ -1395,8 +1388,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
        else if (strstr(boot_cpu_data.x86_model_id, "CPU       U"))
                limits = &ips_ulv_limits;
        else {
-               dev_info(&ips->dev->dev, "No CPUID match found.\n");
-               goto out;
+               dev_info(ips->dev, "No CPUID match found.\n");
+               return NULL;
        }
 
        rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
@@ -1404,12 +1397,12 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
 
        /* Sanity check TDP against CPU */
        if (limits->core_power_limit != (tdp / 8) * 1000) {
-               dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
+               dev_info(ips->dev,
+                        "CPU TDP doesn't match expected value (found %d, expected %d)\n",
                         tdp / 8, limits->core_power_limit / 1000);
                limits->core_power_limit = (tdp / 8) * 1000;
        }
 
-out:
        return limits;
 }
 
@@ -1459,7 +1452,7 @@ ips_gpu_turbo_enabled(struct ips_driver *ips)
 {
        if (!ips->gpu_busy && late_i915_load) {
                if (ips_get_i915_syms(ips)) {
-                       dev_info(&ips->dev->dev,
+                       dev_info(ips->dev,
                                 "i915 driver attached, reenabling gpu turbo\n");
                        ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
                }
@@ -1480,8 +1473,7 @@ ips_link_to_i915_driver(void)
 EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
 
 static const struct pci_device_id ips_id_table[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
-                    PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
        { 0, }
 };
 
@@ -1517,62 +1509,45 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (dmi_check_system(ips_blacklist))
                return -ENODEV;
 
-       ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
+       ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL);
        if (!ips)
                return -ENOMEM;
 
-       pci_set_drvdata(dev, ips);
-       ips->dev = dev;
+       spin_lock_init(&ips->turbo_status_lock);
+       ips->dev = &dev->dev;
 
        ips->limits = ips_detect_cpu(ips);
        if (!ips->limits) {
                dev_info(&dev->dev, "IPS not supported on this CPU\n");
-               ret = -ENXIO;
-               goto error_free;
+               return -ENXIO;
        }
 
-       spin_lock_init(&ips->turbo_status_lock);
-
-       ret = pci_enable_device(dev);
+       ret = pcim_enable_device(dev);
        if (ret) {
                dev_err(&dev->dev, "can't enable PCI device, aborting\n");
-               goto error_free;
+               return ret;
        }
 
-       if (!pci_resource_start(dev, 0)) {
-               dev_err(&dev->dev, "TBAR not assigned, aborting\n");
-               ret = -ENXIO;
-               goto error_free;
-       }
-
-       ret = pci_request_regions(dev, "ips thermal sensor");
+       ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev));
        if (ret) {
-               dev_err(&dev->dev, "thermal resource busy, aborting\n");
-               goto error_free;
-       }
-
-
-       ips->regmap = ioremap(pci_resource_start(dev, 0),
-                             pci_resource_len(dev, 0));
-       if (!ips->regmap) {
                dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
-               ret = -EBUSY;
-               goto error_release;
+               return ret;
        }
+       ips->regmap = pcim_iomap_table(dev)[0];
+
+       pci_set_drvdata(dev, ips);
 
        tse = thm_readb(THM_TSE);
        if (tse != TSE_EN) {
                dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
-               ret = -ENXIO;
-               goto error_unmap;
+               return -ENXIO;
        }
 
        trc = thm_readw(THM_TRC);
        trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
        if ((trc & trc_required_mask) != trc_required_mask) {
                dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
-               ret = -ENXIO;
-               goto error_unmap;
+               return -ENXIO;
        }
 
        if (trc & TRC_CORE2_EN)
@@ -1602,20 +1577,23 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        rdmsrl(PLATFORM_INFO, platform_info);
        if (!(platform_info & PLATFORM_TDP)) {
                dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
-               ret = -ENODEV;
-               goto error_unmap;
+               return -ENODEV;
        }
 
        /*
         * IRQ handler for ME interaction
         * Note: don't use MSI here as the PCH has bugs.
         */
-       pci_disable_msi(dev);
-       ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips",
-                         ips);
+       ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+       if (ret < 0)
+               return ret;
+
+       ips->irq = pci_irq_vector(dev, 0);
+
+       ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips);
        if (ret) {
                dev_err(&dev->dev, "request irq failed, aborting\n");
-               goto error_unmap;
+               return ret;
        }
 
        /* Enable aux, hot & critical interrupts */
@@ -1672,13 +1650,8 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
 error_thread_cleanup:
        kthread_stop(ips->adjust);
 error_free_irq:
-       free_irq(ips->dev->irq, ips);
-error_unmap:
-       iounmap(ips->regmap);
-error_release:
-       pci_release_regions(dev);
-error_free:
-       kfree(ips);
+       free_irq(ips->irq, ips);
+       pci_free_irq_vectors(dev);
        return ret;
 }
 
@@ -1709,27 +1682,20 @@ static void ips_remove(struct pci_dev *dev)
        wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
        wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
 
-       free_irq(ips->dev->irq, ips);
+       free_irq(ips->irq, ips);
+       pci_free_irq_vectors(dev);
        if (ips->adjust)
                kthread_stop(ips->adjust);
        if (ips->monitor)
                kthread_stop(ips->monitor);
-       iounmap(ips->regmap);
-       pci_release_regions(dev);
-       kfree(ips);
        dev_dbg(&dev->dev, "IPS driver removed\n");
 }
 
-static void ips_shutdown(struct pci_dev *dev)
-{
-}
-
 static struct pci_driver ips_pci_driver = {
        .name = "intel ips",
        .id_table = ips_id_table,
        .probe = ips_probe,
        .remove = ips_remove,
-       .shutdown = ips_shutdown,
 };
 
 module_pci_driver(ips_pci_driver);
index 73299beff5b358b8ba5825d5d83d88d1182e333b..60f4e3ddbe9f5b3f84e02030a57b98793141e615 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  */
index a47a41fc10ad77c427158811857da4bc21cd747f..b5b890127479f8f586880762828bbd447a383ddf 100644 (file)
@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
         * - GTDRIVER_IPC BASE_IFACE
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-       if (res) {
+       if (res && resource_size(res) > 1) {
                addr = devm_ioremap_resource(&pdev->dev, res);
                if (!IS_ERR(addr))
                        punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-       if (res) {
+       if (res && resource_size(res) > 1) {
                addr = devm_ioremap_resource(&pdev->dev, res);
                if (!IS_ERR(addr))
                        punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
-       if (res) {
+       if (res && resource_size(res) > 1) {
                addr = devm_ioremap_resource(&pdev->dev, res);
                if (!IS_ERR(addr))
                        punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
-       if (res) {
+       if (res && resource_size(res) > 1) {
                addr = devm_ioremap_resource(&pdev->dev, res);
                if (!IS_ERR(addr))
                        punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
index 0d4c3808a6d892f38b0bf6f3ae87dc9f8ae8dba0..f378621b5fe9d86632a853a33599b1efff88b05a 100644 (file)
@@ -15,9 +15,8 @@
  * Telemetry Framework provides platform related PM and performance statistics.
  * This file provides the core telemetry API implementation.
  */
-#include <linux/module.h>
-#include <linux/init.h>
 #include <linux/device.h>
+#include <linux/module.h>
 
 #include <asm/intel_telemetry.h>
 
index d4fc42b4cbebfedeeaf39e8c65e28b2d0a8b6c0a..4249e8267bbcfc321bd83ac678c20de8f967b584 100644 (file)
  * /sys/kernel/debug/telemetry/ioss_race_verbosity: Write and Change Tracing
  *                             Verbosity via firmware
  */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
 #include <linux/debugfs.h>
-#include <linux/seq_file.h>
+#include <linux/device.h>
 #include <linux/io.h>
-#include <linux/uaccess.h>
+#include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/seq_file.h>
 #include <linux/suspend.h>
 
 #include <asm/cpu_device_id.h>
@@ -76,8 +74,6 @@
 #define TELEM_IOSS_DX_D0IX_EVTS                25
 #define TELEM_IOSS_PG_EVTS             30
 
-#define TELEM_EVT_LEN(x) (sizeof(x)/sizeof((x)[0]))
-
 #define TELEM_DEBUGFS_CPU(model, data) \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data}
 
@@ -304,13 +300,13 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
        .ioss_d0ix_data = telem_apl_ioss_d0ix_data,
        .ioss_pg_data = telem_apl_ioss_pg_data,
 
-       .pss_idle_evts = TELEM_EVT_LEN(telem_apl_pss_idle_data),
-       .pcs_idle_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_idle_blkd_data),
-       .pcs_s0ix_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_s0ix_blkd_data),
-       .pss_ltr_evts = TELEM_EVT_LEN(telem_apl_pss_ltr_data),
-       .pss_wakeup_evts = TELEM_EVT_LEN(telem_apl_pss_wakeup),
-       .ioss_d0ix_evts = TELEM_EVT_LEN(telem_apl_ioss_d0ix_data),
-       .ioss_pg_evts = TELEM_EVT_LEN(telem_apl_ioss_pg_data),
+       .pss_idle_evts = ARRAY_SIZE(telem_apl_pss_idle_data),
+       .pcs_idle_blkd_evts = ARRAY_SIZE(telem_apl_pcs_idle_blkd_data),
+       .pcs_s0ix_blkd_evts = ARRAY_SIZE(telem_apl_pcs_s0ix_blkd_data),
+       .pss_ltr_evts = ARRAY_SIZE(telem_apl_pss_ltr_data),
+       .pss_wakeup_evts = ARRAY_SIZE(telem_apl_pss_wakeup),
+       .ioss_d0ix_evts = ARRAY_SIZE(telem_apl_ioss_d0ix_data),
+       .ioss_pg_evts = ARRAY_SIZE(telem_apl_ioss_pg_data),
 
        .pstates_id = TELEM_APL_PSS_PSTATES_ID,
        .pss_idle_id = TELEM_APL_PSS_IDLE_ID,
index e0424d5a795a5e0a5baa969546df60252c9a230e..2f889d6c270e85c50fd8296af3636cc216e84ed9 100644 (file)
  * It used the PUNIT and PMC IPC interfaces for configuring the counters.
  * The accumulated results are fetched from SRAM.
  */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
+
 #include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/pci.h>
-#include <linux/suspend.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 
 #include <asm/cpu_device_id.h>
@@ -256,7 +250,7 @@ static int telemetry_check_evtid(enum telemetry_unit telem_unit,
                break;
 
        default:
-               pr_err("Unknown Telemetry action Specified %d\n", action);
+               pr_err("Unknown Telemetry action specified %d\n", action);
                return -EINVAL;
        }
 
@@ -659,7 +653,7 @@ static int telemetry_setup(struct platform_device *pdev)
        ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
                                        TELEM_RESET);
        if (ret) {
-               dev_err(&pdev->dev, "TELEMTRY Setup Failed\n");
+               dev_err(&pdev->dev, "TELEMETRY Setup Failed\n");
                return ret;
        }
        return 0;
@@ -685,7 +679,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
        ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
                                        TELEM_UPDATE);
        if (ret)
-               pr_err("TELEMTRY Config Failed\n");
+               pr_err("TELEMETRY Config Failed\n");
 
        return ret;
 }
@@ -822,7 +816,7 @@ static int telemetry_plt_reset_events(void)
        ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
                                        TELEM_RESET);
        if (ret)
-               pr_err("TELEMTRY Reset Failed\n");
+               pr_err("TELEMETRY Reset Failed\n");
 
        return ret;
 }
@@ -885,7 +879,7 @@ static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts,
        ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
                                        TELEM_ADD);
        if (ret)
-               pr_err("TELEMTRY ADD Failed\n");
+               pr_err("TELEMETRY ADD Failed\n");
 
        return ret;
 }
@@ -1195,7 +1189,7 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
 
        ret = telemetry_set_pltdata(&telm_pltops, telm_conf);
        if (ret) {
-               dev_err(&pdev->dev, "TELEMTRY Set Pltops Failed.\n");
+               dev_err(&pdev->dev, "TELEMETRY Set Pltops Failed.\n");
                goto out;
        }
 
@@ -1210,7 +1204,7 @@ out:
                iounmap(telm_conf->pss_config.regmap);
        if (telm_conf->ioss_config.regmap)
                iounmap(telm_conf->ioss_config.regmap);
-       dev_err(&pdev->dev, "TELEMTRY Setup Failed.\n");
+       dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
 
        return ret;
 }
@@ -1234,7 +1228,6 @@ static struct platform_driver telemetry_soc_driver = {
 
 static int __init telemetry_module_init(void)
 {
-       pr_info(DRIVER_NAME ": version %s loaded\n", DRIVER_VERSION);
        return platform_driver_register(&telemetry_soc_driver);
 }
 
index 4f60d8e32a0a538b9632d809d5e52186007d1db3..d4ea01805879b4c9049b1657fad6f6ddb9de27d9 100644 (file)
@@ -125,6 +125,7 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
 
 static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
        ICPU(INTEL_FAM6_BROADWELL_X),
+       ICPU(INTEL_FAM6_SKYLAKE_X),
        {}
 };
 
index 4f3de2a8c4dfe4fd594a44d9fa9081a9157b9069..504256c3660d84e15444d265ca1fd9747f4fc0dc 100644 (file)
@@ -216,8 +216,8 @@ static struct resource mlxplat_mlxcpld_resources[] = {
        [0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"),
 };
 
-struct platform_device *mlxplat_dev;
-struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
+static struct platform_device *mlxplat_dev;
+static struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
 
 static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
 {
index bc98ef95514a1770c285ed54b4913d7c41ff5ad6..9b9e1f39bbfbb8a75a643783072080bc10b10735 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <linux/input-polldev.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -64,8 +65,23 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
        }
 }
 
+/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
+static const struct dmi_system_id peaq_dmi_table[] __initconst = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+               },
+       },
+       {}
+};
+
 static int __init peaq_wmi_init(void)
 {
+       /* WMI GUID is not unique, also check for a DMI match */
+       if (!dmi_check_system(peaq_dmi_table))
+               return -ENODEV;
+
        if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
                return -ENODEV;
 
@@ -86,9 +102,6 @@ static int __init peaq_wmi_init(void)
 
 static void __exit peaq_wmi_exit(void)
 {
-       if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
-               return;
-
        input_unregister_polled_device(peaq_poll_dev);
 }
 
index 1157a7b646d66c17682e985eb22d81e6bea7ab45..266535c2a72f21dc7309962542bd71b68fe9b98f 100644 (file)
@@ -58,6 +58,7 @@ static const struct property_entry dexp_ursus_7w_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
 
@@ -72,6 +73,7 @@ static const struct property_entry surftab_wintron70_st70416_6_props[] = {
        PROPERTY_ENTRY_STRING("firmware-name",
                              "gsl1686-surftab-wintron70-st70416-6.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
 
@@ -83,6 +85,8 @@ static const struct silead_ts_dmi_data surftab_wintron70_st70416_6_data = {
 static const struct property_entry gp_electronic_t701_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
        PROPERTY_ENTRY_STRING("firmware-name",
                              "gsl1680-gp-electronic-t701.fw"),
        { }
@@ -114,6 +118,7 @@ static const struct property_entry pov_mobii_wintab_p800w_props[] = {
        PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
        PROPERTY_ENTRY_STRING("firmware-name",
                              "gsl3692-pov-mobii-wintab-p800w.fw"),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
        { }
 };
 
@@ -136,6 +141,36 @@ static const struct silead_ts_dmi_data itworks_tw891_data = {
        .properties     = itworks_tw891_props,
 };
 
+static const struct property_entry chuwi_hi8_pro_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct silead_ts_dmi_data chuwi_hi8_pro_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = chuwi_hi8_pro_props,
+};
+
+static const struct property_entry digma_citi_e200_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_STRING("firmware-name",
+                             "gsl1686-digma_citi_e200.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct silead_ts_dmi_data digma_citi_e200_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = digma_citi_e200_props,
+};
+
 static const struct dmi_system_id silead_ts_dmi_table[] = {
        {
                /* CUBE iwork8 Air */
@@ -219,6 +254,23 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "TW891"),
                },
        },
+       {
+               /* Chuwi Hi8 Pro */
+               .driver_data = (void *)&chuwi_hi8_pro_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
+               },
+       },
+       {
+               /* Digma Citi E200 */
+               .driver_data = (void *)&digma_citi_e200_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Digma"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+               },
+       },
        { },
 };
 
index a16cea2be9c34a0c83d17a6ce7a35a1704956682..935121814c97711a4d8879c36c80361ab5462967 100644 (file)
@@ -363,7 +363,7 @@ static int sony_laptop_input_keycode_map[] = {
 };
 
 /* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(unsigned long unused)
+static void do_sony_laptop_release_key(struct timer_list *unused)
 {
        struct sony_laptop_keypress kp;
        unsigned long flags;
@@ -470,7 +470,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
                goto err_dec_users;
        }
 
-       setup_timer(&sony_laptop_input.release_key_timer,
+       timer_setup(&sony_laptop_input.release_key_timer,
                    do_sony_laptop_release_key, 0);
 
        /* input keys */
@@ -1627,7 +1627,7 @@ static const struct rfkill_ops sony_rfkill_ops = {
 static int sony_nc_setup_rfkill(struct acpi_device *device,
                                enum sony_nc_rfkill nc_type)
 {
-       int err = 0;
+       int err;
        struct rfkill *rfk;
        enum rfkill_type type;
        const char *name;
@@ -1660,17 +1660,19 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
        if (!rfk)
                return -ENOMEM;
 
-       if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+       err = sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+       if (err < 0) {
                rfkill_destroy(rfk);
-               return -1;
+               return err;
        }
        hwblock = !(result & 0x1);
 
-       if (sony_call_snc_handle(sony_rfkill_handle,
-                               sony_rfkill_address[nc_type],
-                               &result) < 0) {
+       err = sony_call_snc_handle(sony_rfkill_handle,
+                                  sony_rfkill_address[nc_type],
+                                  &result);
+       if (err < 0) {
                rfkill_destroy(rfk);
-               return -1;
+               return err;
        }
        swblock = !(result & 0x2);
 
index 3887dfeafc964522e43c077534bf62e0747f7dcf..117be48ff4de9a3a70df898565cf6495e92d7ddd 100644 (file)
@@ -310,8 +310,7 @@ static struct {
        enum {
                TP_HOTKEY_TABLET_NONE = 0,
                TP_HOTKEY_TABLET_USES_MHKG,
-               /* X1 Yoga 2016, seen on BIOS N1FET44W */
-               TP_HOTKEY_TABLET_USES_CMMD,
+               TP_HOTKEY_TABLET_USES_GMMS,
        } hotkey_tablet;
        u32 kbdlight:1;
        u32 light:1;
@@ -2044,8 +2043,28 @@ static void hotkey_poll_setup(const bool may_warn);
 
 /* HKEY.MHKG() return bits */
 #define TP_HOTKEY_TABLET_MASK (1 << 3)
-/* ThinkPad X1 Yoga (2016) */
-#define TP_EC_CMMD_TABLET_MODE 0x6
+enum {
+       TP_ACPI_MULTI_MODE_INVALID      = 0,
+       TP_ACPI_MULTI_MODE_UNKNOWN      = 1 << 0,
+       TP_ACPI_MULTI_MODE_LAPTOP       = 1 << 1,
+       TP_ACPI_MULTI_MODE_TABLET       = 1 << 2,
+       TP_ACPI_MULTI_MODE_FLAT         = 1 << 3,
+       TP_ACPI_MULTI_MODE_STAND        = 1 << 4,
+       TP_ACPI_MULTI_MODE_TENT         = 1 << 5,
+       TP_ACPI_MULTI_MODE_STAND_TENT   = 1 << 6,
+};
+
+enum {
+       /* The following modes are considered tablet mode for the purpose of
+        * reporting the status to userspace. i.e. in all these modes it makes
+        * sense to disable the laptop input devices such as touchpad and
+        * keyboard.
+        */
+       TP_ACPI_MULTI_MODE_TABLET_LIKE  = TP_ACPI_MULTI_MODE_TABLET |
+                                         TP_ACPI_MULTI_MODE_STAND |
+                                         TP_ACPI_MULTI_MODE_TENT |
+                                         TP_ACPI_MULTI_MODE_STAND_TENT,
+};
 
 static int hotkey_get_wlsw(void)
 {
@@ -2066,6 +2085,90 @@ static int hotkey_get_wlsw(void)
        return (status) ? TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
 }
 
+static int hotkey_gmms_get_tablet_mode(int s, int *has_tablet_mode)
+{
+       int type = (s >> 16) & 0xffff;
+       int value = s & 0xffff;
+       int mode = TP_ACPI_MULTI_MODE_INVALID;
+       int valid_modes = 0;
+
+       if (has_tablet_mode)
+               *has_tablet_mode = 0;
+
+       switch (type) {
+       case 1:
+               valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+                             TP_ACPI_MULTI_MODE_TABLET |
+                             TP_ACPI_MULTI_MODE_STAND_TENT;
+               break;
+       case 2:
+               valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+                             TP_ACPI_MULTI_MODE_FLAT |
+                             TP_ACPI_MULTI_MODE_TABLET |
+                             TP_ACPI_MULTI_MODE_STAND |
+                             TP_ACPI_MULTI_MODE_TENT;
+               break;
+       case 3:
+               valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+                             TP_ACPI_MULTI_MODE_FLAT;
+               break;
+       case 4:
+               valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+                             TP_ACPI_MULTI_MODE_TABLET |
+                             TP_ACPI_MULTI_MODE_STAND |
+                             TP_ACPI_MULTI_MODE_TENT;
+               break;
+       case 5:
+               valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+                             TP_ACPI_MULTI_MODE_FLAT |
+                             TP_ACPI_MULTI_MODE_TABLET |
+                             TP_ACPI_MULTI_MODE_STAND |
+                             TP_ACPI_MULTI_MODE_TENT;
+               break;
+       default:
+               pr_err("Unknown multi mode status type %d with value 0x%04X, please report this to %s\n",
+                      type, value, TPACPI_MAIL);
+               return 0;
+       }
+
+       if (has_tablet_mode && (valid_modes & TP_ACPI_MULTI_MODE_TABLET_LIKE))
+               *has_tablet_mode = 1;
+
+       switch (value) {
+       case 1:
+               mode = TP_ACPI_MULTI_MODE_LAPTOP;
+               break;
+       case 2:
+               mode = TP_ACPI_MULTI_MODE_FLAT;
+               break;
+       case 3:
+               mode = TP_ACPI_MULTI_MODE_TABLET;
+               break;
+       case 4:
+               if (type == 1)
+                       mode = TP_ACPI_MULTI_MODE_STAND_TENT;
+               else
+                       mode = TP_ACPI_MULTI_MODE_STAND;
+               break;
+       case 5:
+               mode = TP_ACPI_MULTI_MODE_TENT;
+               break;
+       default:
+               if (type == 5 && value == 0xffff) {
+                       pr_warn("Multi mode status is undetected, assuming laptop\n");
+                       return 0;
+               }
+       }
+
+       if (!(mode & valid_modes)) {
+               pr_err("Unknown/reserved multi mode value 0x%04X for type %d, please report this to %s\n",
+                      value, type, TPACPI_MAIL);
+               return 0;
+       }
+
+       return !!(mode & TP_ACPI_MULTI_MODE_TABLET_LIKE);
+}
+
 static int hotkey_get_tablet_mode(int *status)
 {
        int s;
@@ -2077,11 +2180,11 @@ static int hotkey_get_tablet_mode(int *status)
 
                *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
                break;
-       case TP_HOTKEY_TABLET_USES_CMMD:
-               if (!acpi_evalf(ec_handle, &s, "CMMD", "d"))
+       case TP_HOTKEY_TABLET_USES_GMMS:
+               if (!acpi_evalf(hkey_handle, &s, "GMMS", "dd", 0))
                        return -EIO;
 
-               *status = (s == TP_EC_CMMD_TABLET_MODE);
+               *status = hotkey_gmms_get_tablet_mode(s, NULL);
                break;
        default:
                break;
@@ -3113,16 +3216,19 @@ static int hotkey_init_tablet_mode(void)
        int in_tablet_mode = 0, res;
        char *type = NULL;
 
-       if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+       if (acpi_evalf(hkey_handle, &res, "GMMS", "qdd", 0)) {
+               int has_tablet_mode;
+
+               in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
+                                                            &has_tablet_mode);
+               if (has_tablet_mode)
+                       tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
+               type = "GMMS";
+       } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
                /* For X41t, X60t, X61t Tablets... */
                tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG;
                in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK);
                type = "MHKG";
-       } else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) {
-               /* For X1 Yoga (2016) */
-               tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD;
-               in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE;
-               type = "CMMD";
        }
 
        if (!tp_features.hotkey_tablet)
index 0765b1797d4c0d35cd58f588d8c4717bf6b1179d..791449a2370f4f10af698dbaf27bb3749d6d263a 100644 (file)
 
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/acpi.h>
 #include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/acpi.h>
-#include <linux/slab.h>
+#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/wmi.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
 #include <linux/uuid.h>
+#include <linux/wmi.h>
+#include <uapi/linux/wmi.h>
 
 ACPI_MODULE_NAME("wmi");
 MODULE_AUTHOR("Carlos Corbacho");
@@ -69,9 +72,12 @@ struct wmi_block {
        struct wmi_device dev;
        struct list_head list;
        struct guid_block gblock;
+       struct miscdevice char_dev;
+       struct mutex char_mutex;
        struct acpi_device *acpi_device;
        wmi_notify_handler handler;
        void *handler_data;
+       u64 req_buf_size;
 
        bool read_takes_no_args;
 };
@@ -188,6 +194,25 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
 /*
  * Exported WMI functions
  */
+
+/**
+ * set_required_buffer_size - Sets the buffer size needed for performing IOCTL
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ *
+ * Allocates memory needed for buffer, stores the buffer size in that memory
+ */
+int set_required_buffer_size(struct wmi_device *wdev, u64 length)
+{
+       struct wmi_block *wblock;
+
+       wblock = container_of(wdev, struct wmi_block, dev);
+       wblock->req_buf_size = length;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(set_required_buffer_size);
+
 /**
  * wmi_evaluate_method - Evaluate a WMI method
  * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
@@ -200,6 +225,28 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
  */
 acpi_status wmi_evaluate_method(const char *guid_string, u8 instance,
 u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+{
+       struct wmi_block *wblock = NULL;
+
+       if (!find_guid(guid_string, &wblock))
+               return AE_ERROR;
+       return wmidev_evaluate_method(&wblock->dev, instance, method_id,
+                                     in, out);
+}
+EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+
+/**
+ * wmidev_evaluate_method - Evaluate a WMI method
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * &in: Buffer containing input for the method call
+ * &out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance,
+       u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
 {
        struct guid_block *block = NULL;
        struct wmi_block *wblock = NULL;
@@ -209,9 +256,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
        union acpi_object params[3];
        char method[5] = "WM";
 
-       if (!find_guid(guid_string, &wblock))
-               return AE_ERROR;
-
+       wblock = container_of(wdev, struct wmi_block, dev);
        block = &wblock->gblock;
        handle = wblock->acpi_device->handle;
 
@@ -246,7 +291,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
 
        return status;
 }
-EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+EXPORT_SYMBOL_GPL(wmidev_evaluate_method);
 
 static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
                                 struct acpi_buffer *out)
@@ -348,23 +393,6 @@ union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance)
 }
 EXPORT_SYMBOL_GPL(wmidev_block_query);
 
-struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev,
-                                        const char *guid_string)
-{
-       struct wmi_block *this_wb = container_of(wdev, struct wmi_block, dev);
-       struct wmi_block *other_wb;
-
-       if (!find_guid(guid_string, &other_wb))
-               return NULL;
-
-       if (other_wb->acpi_device != this_wb->acpi_device)
-               return NULL;
-
-       get_device(&other_wb->dev.dev);
-       return &other_wb->dev;
-}
-EXPORT_SYMBOL_GPL(wmidev_get_other_guid);
-
 /**
  * wmi_set_block - Write to a WMI block
  * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
@@ -761,6 +789,113 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
 
        return 0;
 }
+static int wmi_char_open(struct inode *inode, struct file *filp)
+{
+       const char *driver_name = filp->f_path.dentry->d_iname;
+       struct wmi_block *wblock = NULL;
+       struct wmi_block *next = NULL;
+
+       list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+               if (!wblock->dev.dev.driver)
+                       continue;
+               if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+                       filp->private_data = wblock;
+                       break;
+               }
+       }
+
+       if (!filp->private_data)
+               return -ENODEV;
+
+       return nonseekable_open(inode, filp);
+}
+
+static ssize_t wmi_char_read(struct file *filp, char __user *buffer,
+       size_t length, loff_t *offset)
+{
+       struct wmi_block *wblock = filp->private_data;
+
+       return simple_read_from_buffer(buffer, length, offset,
+                                      &wblock->req_buf_size,
+                                      sizeof(wblock->req_buf_size));
+}
+
+static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct wmi_ioctl_buffer __user *input =
+               (struct wmi_ioctl_buffer __user *) arg;
+       struct wmi_block *wblock = filp->private_data;
+       struct wmi_ioctl_buffer *buf = NULL;
+       struct wmi_driver *wdriver = NULL;
+       int ret;
+
+       if (_IOC_TYPE(cmd) != WMI_IOC)
+               return -ENOTTY;
+
+       /* make sure we're not calling a higher instance than exists*/
+       if (_IOC_NR(cmd) >= wblock->gblock.instance_count)
+               return -EINVAL;
+
+       mutex_lock(&wblock->char_mutex);
+       buf = wblock->handler_data;
+       if (get_user(buf->length, &input->length)) {
+               dev_dbg(&wblock->dev.dev, "Read length from user failed\n");
+               ret = -EFAULT;
+               goto out_ioctl;
+       }
+       /* if it's too small, abort */
+       if (buf->length < wblock->req_buf_size) {
+               dev_err(&wblock->dev.dev,
+                       "Buffer %lld too small, need at least %lld\n",
+                       buf->length, wblock->req_buf_size);
+               ret = -EINVAL;
+               goto out_ioctl;
+       }
+       /* if it's too big, warn, driver will only use what is needed */
+       if (buf->length > wblock->req_buf_size)
+               dev_warn(&wblock->dev.dev,
+                       "Buffer %lld is bigger than required %lld\n",
+                       buf->length, wblock->req_buf_size);
+
+       /* copy the structure from userspace */
+       if (copy_from_user(buf, input, wblock->req_buf_size)) {
+               dev_dbg(&wblock->dev.dev, "Copy %llu from user failed\n",
+                       wblock->req_buf_size);
+               ret = -EFAULT;
+               goto out_ioctl;
+       }
+
+       /* let the driver do any filtering and do the call */
+       wdriver = container_of(wblock->dev.dev.driver,
+                              struct wmi_driver, driver);
+       if (!try_module_get(wdriver->driver.owner)) {
+               ret = -EBUSY;
+               goto out_ioctl;
+       }
+       ret = wdriver->filter_callback(&wblock->dev, cmd, buf);
+       module_put(wdriver->driver.owner);
+       if (ret)
+               goto out_ioctl;
+
+       /* return the result (only up to our internal buffer size) */
+       if (copy_to_user(input, buf, wblock->req_buf_size)) {
+               dev_dbg(&wblock->dev.dev, "Copy %llu to user failed\n",
+                       wblock->req_buf_size);
+               ret = -EFAULT;
+       }
+
+out_ioctl:
+       mutex_unlock(&wblock->char_mutex);
+       return ret;
+}
+
+static const struct file_operations wmi_fops = {
+       .owner          = THIS_MODULE,
+       .read           = wmi_char_read,
+       .open           = wmi_char_open,
+       .unlocked_ioctl = wmi_ioctl,
+       .compat_ioctl   = wmi_ioctl,
+};
 
 static int wmi_dev_probe(struct device *dev)
 {
@@ -768,16 +903,63 @@ static int wmi_dev_probe(struct device *dev)
        struct wmi_driver *wdriver =
                container_of(dev->driver, struct wmi_driver, driver);
        int ret = 0;
+       int count;
+       char *buf;
 
        if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
                dev_warn(dev, "failed to enable device -- probing anyway\n");
 
        if (wdriver->probe) {
                ret = wdriver->probe(dev_to_wdev(dev));
-               if (ret != 0 && ACPI_FAILURE(wmi_method_enable(wblock, 0)))
-                       dev_warn(dev, "failed to disable device\n");
+               if (ret != 0)
+                       goto probe_failure;
+       }
+
+       /* driver wants a character device made */
+       if (wdriver->filter_callback) {
+               /* check that required buffer size declared by driver or MOF */
+               if (!wblock->req_buf_size) {
+                       dev_err(&wblock->dev.dev,
+                               "Required buffer size not set\n");
+                       ret = -EINVAL;
+                       goto probe_failure;
+               }
+
+               count = get_order(wblock->req_buf_size);
+               wblock->handler_data = (void *)__get_free_pages(GFP_KERNEL,
+                                                               count);
+               if (!wblock->handler_data) {
+                       ret = -ENOMEM;
+                       goto probe_failure;
+               }
+
+               buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
+               if (!buf) {
+                       ret = -ENOMEM;
+                       goto probe_string_failure;
+               }
+               sprintf(buf, "wmi/%s", wdriver->driver.name);
+               wblock->char_dev.minor = MISC_DYNAMIC_MINOR;
+               wblock->char_dev.name = buf;
+               wblock->char_dev.fops = &wmi_fops;
+               wblock->char_dev.mode = 0444;
+               ret = misc_register(&wblock->char_dev);
+               if (ret) {
+                       dev_warn(dev, "failed to register char dev: %d", ret);
+                       ret = -ENOMEM;
+                       goto probe_misc_failure;
+               }
        }
 
+       return 0;
+
+probe_misc_failure:
+       kfree(buf);
+probe_string_failure:
+       kfree(wblock->handler_data);
+probe_failure:
+       if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
+               dev_warn(dev, "failed to disable device\n");
        return ret;
 }
 
@@ -788,6 +970,13 @@ static int wmi_dev_remove(struct device *dev)
                container_of(dev->driver, struct wmi_driver, driver);
        int ret = 0;
 
+       if (wdriver->filter_callback) {
+               misc_deregister(&wblock->char_dev);
+               kfree(wblock->char_dev.name);
+               free_pages((unsigned long)wblock->handler_data,
+                          get_order(wblock->req_buf_size));
+       }
+
        if (wdriver->remove)
                ret = wdriver->remove(dev_to_wdev(dev));
 
@@ -844,6 +1033,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
 
        if (gblock->flags & ACPI_WMI_METHOD) {
                wblock->dev.dev.type = &wmi_type_method;
+               mutex_init(&wblock->char_mutex);
                goto out_init;
        }
 
@@ -1145,7 +1335,7 @@ static int acpi_wmi_remove(struct platform_device *device)
        acpi_remove_address_space_handler(acpi_device->handle,
                                ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
        wmi_free_devices(acpi_device);
-       device_unregister((struct device *)dev_get_drvdata(&device->dev));
+       device_destroy(&wmi_bus_class, MKDEV(0, 0));
 
        return 0;
 }
@@ -1199,7 +1389,7 @@ static int acpi_wmi_probe(struct platform_device *device)
        return 0;
 
 err_remove_busdev:
-       device_unregister(wmi_bus_dev);
+       device_destroy(&wmi_bus_class, MKDEV(0, 0));
 
 err_remove_notify_handler:
        acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY,
@@ -1264,8 +1454,8 @@ err_unreg_class:
 static void __exit acpi_wmi_exit(void)
 {
        platform_driver_unregister(&acpi_wmi_driver);
-       class_unregister(&wmi_bus_class);
        bus_unregister(&wmi_bus_type);
+       class_unregister(&wmi_bus_class);
 }
 
 subsys_initcall(acpi_wmi_init);
index 436b4e4e71a149384c1246baa2a508e4dce21f72..04735649052ab3652d38c8ce775d033fb8cde7ac 100644 (file)
@@ -39,7 +39,7 @@ static struct timer_list ktimer;
  * The kernel timer
  */
 
-static void pps_ktimer_event(unsigned long ptr)
+static void pps_ktimer_event(struct timer_list *unused)
 {
        struct pps_event_time ts;
 
@@ -85,7 +85,7 @@ static int __init pps_ktimer_init(void)
                return -ENOMEM;
        }
 
-       setup_timer(&ktimer, pps_ktimer_event, 0);
+       timer_setup(&ktimer, pps_ktimer_event, 0);
        mod_timer(&ktimer, jiffies + HZ);
 
        dev_info(pps->dev, "ktimer PPS source registered\n");
index 75db585a2a9486e354c08cf971b46507f014c3d4..acd3ce8ecf3f134bb9c502687c5e5600ddec87c7 100644 (file)
@@ -37,11 +37,20 @@ struct atmel_tcb_pwm_device {
        unsigned period;                /* PWM period expressed in clk cycles */
 };
 
+struct atmel_tcb_channel {
+       u32 enabled;
+       u32 cmr;
+       u32 ra;
+       u32 rb;
+       u32 rc;
+};
+
 struct atmel_tcb_pwm_chip {
        struct pwm_chip chip;
        spinlock_t lock;
        struct atmel_tc *tc;
        struct atmel_tcb_pwm_device *pwms[NPWM];
+       struct atmel_tcb_channel bkup[NPWM / 2];
 };
 
 static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
@@ -175,12 +184,15 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
         * Use software trigger to apply the new setting.
         * If both PWM devices in this group are disabled we stop the clock.
         */
-       if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC)))
+       if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
                __raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
                             regs + ATMEL_TC_REG(group, CCR));
-       else
+               tcbpwmc->bkup[group].enabled = 1;
+       } else {
                __raw_writel(ATMEL_TC_SWTRG, regs +
                             ATMEL_TC_REG(group, CCR));
+               tcbpwmc->bkup[group].enabled = 0;
+       }
 
        spin_unlock(&tcbpwmc->lock);
 }
@@ -263,6 +275,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
        /* Use software trigger to apply the new setting */
        __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
                     regs + ATMEL_TC_REG(group, CCR));
+       tcbpwmc->bkup[group].enabled = 1;
        spin_unlock(&tcbpwmc->lock);
        return 0;
 }
@@ -445,10 +458,56 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int atmel_tcb_pwm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+       void __iomem *base = tcbpwm->tc->regs;
+       int i;
+
+       for (i = 0; i < (NPWM / 2); i++) {
+               struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+               chan->cmr = readl(base + ATMEL_TC_REG(i, CMR));
+               chan->ra = readl(base + ATMEL_TC_REG(i, RA));
+               chan->rb = readl(base + ATMEL_TC_REG(i, RB));
+               chan->rc = readl(base + ATMEL_TC_REG(i, RC));
+       }
+       return 0;
+}
+
+static int atmel_tcb_pwm_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+       void __iomem *base = tcbpwm->tc->regs;
+       int i;
+
+       for (i = 0; i < (NPWM / 2); i++) {
+               struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+               writel(chan->cmr, base + ATMEL_TC_REG(i, CMR));
+               writel(chan->ra, base + ATMEL_TC_REG(i, RA));
+               writel(chan->rb, base + ATMEL_TC_REG(i, RB));
+               writel(chan->rc, base + ATMEL_TC_REG(i, RC));
+               if (chan->enabled) {
+                       writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+                               base + ATMEL_TC_REG(i, CCR));
+               }
+       }
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
+                        atmel_tcb_pwm_resume);
+
 static struct platform_driver atmel_tcb_pwm_driver = {
        .driver = {
                .name = "atmel-tcb-pwm",
                .of_match_table = atmel_tcb_pwm_dt_ids,
+               .pm = &atmel_tcb_pwm_pm_ops,
        },
        .probe = atmel_tcb_pwm_probe,
        .remove = atmel_tcb_pwm_remove,
index 2fb30deee3457017a22c1cea394cfd2e6e1cab77..815f5333bb8f98dea06e5d4f1969e8f96933b815 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/pwm.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
@@ -39,6 +40,8 @@
 #define PERIP_PWM_PDM_CONTROL_CH_MASK          0x1
 #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch)     ((ch) * 4)
 
+#define IMG_PWM_PM_TIMEOUT                     1000 /* ms */
+
 /*
  * PWM period is specified with a timebase register,
  * in number of step periods. The PWM duty cycle is also
@@ -52,6 +55,8 @@
  */
 #define MIN_TMBASE_STEPS                       16
 
+#define IMG_PWM_NPWM                           4
+
 struct img_pwm_soc_data {
        u32 max_timebase;
 };
@@ -66,6 +71,8 @@ struct img_pwm_chip {
        int             max_period_ns;
        int             min_period_ns;
        const struct img_pwm_soc_data   *data;
+       u32             suspend_ctrl_cfg;
+       u32             suspend_ch_cfg[IMG_PWM_NPWM];
 };
 
 static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -92,6 +99,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        unsigned long mul, output_clk_hz, input_clk_hz;
        struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
        unsigned int max_timebase = pwm_chip->data->max_timebase;
+       int ret;
 
        if (period_ns < pwm_chip->min_period_ns ||
            period_ns > pwm_chip->max_period_ns) {
@@ -123,6 +131,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 
        duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
 
+       ret = pm_runtime_get_sync(chip->dev);
+       if (ret < 0)
+               return ret;
+
        val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
        val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
        val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
@@ -133,6 +145,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
              (timebase << PWM_CH_CFG_TMBASE_SHIFT);
        img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
 
+       pm_runtime_mark_last_busy(chip->dev);
+       pm_runtime_put_autosuspend(chip->dev);
+
        return 0;
 }
 
@@ -140,6 +155,11 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        u32 val;
        struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+       int ret;
+
+       ret = pm_runtime_get_sync(chip->dev);
+       if (ret < 0)
+               return ret;
 
        val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
        val |= BIT(pwm->hwpwm);
@@ -160,6 +180,9 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
        val &= ~BIT(pwm->hwpwm);
        img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+
+       pm_runtime_mark_last_busy(chip->dev);
+       pm_runtime_put_autosuspend(chip->dev);
 }
 
 static const struct pwm_ops img_pwm_ops = {
@@ -182,6 +205,37 @@ static const struct of_device_id img_pwm_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, img_pwm_of_match);
 
+static int img_pwm_runtime_suspend(struct device *dev)
+{
+       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(pwm_chip->pwm_clk);
+       clk_disable_unprepare(pwm_chip->sys_clk);
+
+       return 0;
+}
+
+static int img_pwm_runtime_resume(struct device *dev)
+{
+       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare_enable(pwm_chip->sys_clk);
+       if (ret < 0) {
+               dev_err(dev, "could not prepare or enable sys clock\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(pwm_chip->pwm_clk);
+       if (ret < 0) {
+               dev_err(dev, "could not prepare or enable pwm clock\n");
+               clk_disable_unprepare(pwm_chip->sys_clk);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int img_pwm_probe(struct platform_device *pdev)
 {
        int ret;
@@ -224,23 +278,20 @@ static int img_pwm_probe(struct platform_device *pdev)
                return PTR_ERR(pwm->pwm_clk);
        }
 
-       ret = clk_prepare_enable(pwm->sys_clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
-               return ret;
-       }
-
-       ret = clk_prepare_enable(pwm->pwm_clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
-               goto disable_sysclk;
+       pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       if (!pm_runtime_enabled(&pdev->dev)) {
+               ret = img_pwm_runtime_resume(&pdev->dev);
+               if (ret)
+                       goto err_pm_disable;
        }
 
        clk_rate = clk_get_rate(pwm->pwm_clk);
        if (!clk_rate) {
                dev_err(&pdev->dev, "pwm clock has no frequency\n");
                ret = -EINVAL;
-               goto disable_pwmclk;
+               goto err_suspend;
        }
 
        /* The maximum input clock divider is 512 */
@@ -255,21 +306,23 @@ static int img_pwm_probe(struct platform_device *pdev)
        pwm->chip.dev = &pdev->dev;
        pwm->chip.ops = &img_pwm_ops;
        pwm->chip.base = -1;
-       pwm->chip.npwm = 4;
+       pwm->chip.npwm = IMG_PWM_NPWM;
 
        ret = pwmchip_add(&pwm->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
-               goto disable_pwmclk;
+               goto err_suspend;
        }
 
        platform_set_drvdata(pdev, pwm);
        return 0;
 
-disable_pwmclk:
-       clk_disable_unprepare(pwm->pwm_clk);
-disable_sysclk:
-       clk_disable_unprepare(pwm->sys_clk);
+err_suspend:
+       if (!pm_runtime_enabled(&pdev->dev))
+               img_pwm_runtime_suspend(&pdev->dev);
+err_pm_disable:
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
        return ret;
 }
 
@@ -278,6 +331,11 @@ static int img_pwm_remove(struct platform_device *pdev)
        struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
        u32 val;
        unsigned int i;
+       int ret;
+
+       ret = pm_runtime_get_sync(&pdev->dev);
+       if (ret < 0)
+               return ret;
 
        for (i = 0; i < pwm_chip->chip.npwm; i++) {
                val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
@@ -285,15 +343,79 @@ static int img_pwm_remove(struct platform_device *pdev)
                img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
        }
 
-       clk_disable_unprepare(pwm_chip->pwm_clk);
-       clk_disable_unprepare(pwm_chip->sys_clk);
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       if (!pm_runtime_status_suspended(&pdev->dev))
+               img_pwm_runtime_suspend(&pdev->dev);
 
        return pwmchip_remove(&pwm_chip->chip);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int img_pwm_suspend(struct device *dev)
+{
+       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+       int i, ret;
+
+       if (pm_runtime_status_suspended(dev)) {
+               ret = img_pwm_runtime_resume(dev);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < pwm_chip->chip.npwm; i++)
+               pwm_chip->suspend_ch_cfg[i] = img_pwm_readl(pwm_chip,
+                                                           PWM_CH_CFG(i));
+
+       pwm_chip->suspend_ctrl_cfg = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+
+       img_pwm_runtime_suspend(dev);
+
+       return 0;
+}
+
+static int img_pwm_resume(struct device *dev)
+{
+       struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+       int ret;
+       int i;
+
+       ret = img_pwm_runtime_resume(dev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < pwm_chip->chip.npwm; i++)
+               img_pwm_writel(pwm_chip, PWM_CH_CFG(i),
+                              pwm_chip->suspend_ch_cfg[i]);
+
+       img_pwm_writel(pwm_chip, PWM_CTRL_CFG, pwm_chip->suspend_ctrl_cfg);
+
+       for (i = 0; i < pwm_chip->chip.npwm; i++)
+               if (pwm_chip->suspend_ctrl_cfg & BIT(i))
+                       regmap_update_bits(pwm_chip->periph_regs,
+                                          PERIP_PWM_PDM_CONTROL,
+                                          PERIP_PWM_PDM_CONTROL_CH_MASK <<
+                                          PERIP_PWM_PDM_CONTROL_CH_SHIFT(i),
+                                          0);
+
+       if (pm_runtime_status_suspended(dev))
+               img_pwm_runtime_suspend(dev);
+
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops img_pwm_pm_ops = {
+       SET_RUNTIME_PM_OPS(img_pwm_runtime_suspend,
+                          img_pwm_runtime_resume,
+                          NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(img_pwm_suspend, img_pwm_resume)
+};
+
 static struct platform_driver img_pwm_driver = {
        .driver = {
                .name = "img-pwm",
+               .pm = &img_pwm_pm_ops,
                .of_match_table = img_pwm_of_match,
        },
        .probe = img_pwm_probe,
index b52f3afb2ba1f0f19f1241697ee7dcab92cc0057..f5d97e0ad52b7a71f04689e61dd7755fdf2d2460 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/slab.h>
@@ -40,11 +41,19 @@ enum {
        MTK_CLK_PWM3,
        MTK_CLK_PWM4,
        MTK_CLK_PWM5,
+       MTK_CLK_PWM6,
+       MTK_CLK_PWM7,
+       MTK_CLK_PWM8,
        MTK_CLK_MAX,
 };
 
-static const char * const mtk_pwm_clk_name[] = {
-       "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5"
+static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
+       "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6", "pwm7",
+       "pwm8"
+};
+
+struct mtk_pwm_platform_data {
+       unsigned int num_pwms;
 };
 
 /**
@@ -59,6 +68,10 @@ struct mtk_pwm_chip {
        struct clk *clks[MTK_CLK_MAX];
 };
 
+static const unsigned int mtk_pwm_reg_offset[] = {
+       0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
+};
+
 static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
 {
        return container_of(chip, struct mtk_pwm_chip, chip);
@@ -103,14 +116,14 @@ static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
                                unsigned int offset)
 {
-       return readl(chip->regs + 0x10 + (num * 0x40) + offset);
+       return readl(chip->regs + mtk_pwm_reg_offset[num] + offset);
 }
 
 static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
                                  unsigned int num, unsigned int offset,
                                  u32 value)
 {
-       writel(value, chip->regs + 0x10 + (num * 0x40) + offset);
+       writel(value, chip->regs + mtk_pwm_reg_offset[num] + offset);
 }
 
 static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -185,6 +198,7 @@ static const struct pwm_ops mtk_pwm_ops = {
 
 static int mtk_pwm_probe(struct platform_device *pdev)
 {
+       const struct mtk_pwm_platform_data *data;
        struct mtk_pwm_chip *pc;
        struct resource *res;
        unsigned int i;
@@ -194,15 +208,22 @@ static int mtk_pwm_probe(struct platform_device *pdev)
        if (!pc)
                return -ENOMEM;
 
+       data = of_device_get_match_data(&pdev->dev);
+       if (data == NULL)
+               return -EINVAL;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        pc->regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pc->regs))
                return PTR_ERR(pc->regs);
 
-       for (i = 0; i < MTK_CLK_MAX; i++) {
+       for (i = 0; i < data->num_pwms + 2; i++) {
                pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
-               if (IS_ERR(pc->clks[i]))
+               if (IS_ERR(pc->clks[i])) {
+                       dev_err(&pdev->dev, "clock: %s fail: %ld\n",
+                               mtk_pwm_clk_name[i], PTR_ERR(pc->clks[i]));
                        return PTR_ERR(pc->clks[i]);
+               }
        }
 
        platform_set_drvdata(pdev, pc);
@@ -210,7 +231,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
        pc->chip.dev = &pdev->dev;
        pc->chip.ops = &mtk_pwm_ops;
        pc->chip.base = -1;
-       pc->chip.npwm = 5;
+       pc->chip.npwm = data->num_pwms;
 
        ret = pwmchip_add(&pc->chip);
        if (ret < 0) {
@@ -228,9 +249,23 @@ static int mtk_pwm_remove(struct platform_device *pdev)
        return pwmchip_remove(&pc->chip);
 }
 
+static const struct mtk_pwm_platform_data mt2712_pwm_data = {
+       .num_pwms = 8,
+};
+
+static const struct mtk_pwm_platform_data mt7622_pwm_data = {
+       .num_pwms = 6,
+};
+
+static const struct mtk_pwm_platform_data mt7623_pwm_data = {
+       .num_pwms = 5,
+};
+
 static const struct of_device_id mtk_pwm_of_match[] = {
-       { .compatible = "mediatek,mt7623-pwm" },
-       { }
+       { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
+       { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
+       { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+       { },
 };
 MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
 
index 9793b296108ff1cca722d1bb78f6896b28b777e8..1ac9e438414291a3330edfd5ad5e05370703d4b5 100644 (file)
@@ -219,8 +219,7 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
        unsigned int i;
 
        for (i = 0; i < priv->chip.npwm; i++)
-               if (pwm_is_enabled(&priv->chip.pwms[i]))
-                       pwm_disable(&priv->chip.pwms[i]);
+               pwm_disable(&priv->chip.pwms[i]);
 
        return pwmchip_remove(&priv->chip);
 }
index 6d23f1d1c9b73373e84ea4a954da634cbd7911dc..334199c58f1de78791cc748da3ce9f12f61ba5ba 100644 (file)
@@ -368,14 +368,15 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
        struct sun4i_pwm_chip *pwm;
        struct resource *res;
        int ret;
-       const struct of_device_id *match;
-
-       match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
 
        pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
        if (!pwm)
                return -ENOMEM;
 
+       pwm->data = of_device_get_match_data(&pdev->dev);
+       if (!pwm->data)
+               return -ENODEV;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        pwm->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pwm->base))
@@ -385,7 +386,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(pwm->clk))
                return PTR_ERR(pwm->clk);
 
-       pwm->data = match->data;
        pwm->chip.dev = &pdev->dev;
        pwm->chip.ops = &sun4i_pwm_ops;
        pwm->chip.base = -1;
index 665d9e94a7e1bee135b6e046d8e27a15579fee5d..ec4bc1515f0d0008cb0d4137ca0833e9441142af 100644 (file)
@@ -959,9 +959,10 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
 
        nents = dma_map_sg(chan->device->dev,
                           req->sgt.sgl, req->sgt.nents, dir);
-       if (nents == -EFAULT) {
+       if (nents == 0) {
                rmcd_error("Failed to map SG list");
-               return -EFAULT;
+               ret = -EFAULT;
+               goto err_pg;
        }
 
        ret = do_dma_request(req, xfer, sync, nents);
index e67b923b1ca6a74d880f789c6ade51acee88a50a..4931ed79042847399d5ea9b6a2c1688dda9090d7 100644 (file)
@@ -458,7 +458,7 @@ static void idtg2_remove(struct rio_dev *rdev)
        idtg2_sysfs(rdev, false);
 }
 
-static struct rio_device_id idtg2_id_table[] = {
+static const struct rio_device_id idtg2_id_table[] = {
        {RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)},
        {RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)},
        {RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)},
index c5923a547bedb6d674c5aba84673086020f97a5c..85a3908294d94f6f2fb6bf1b59c590211467c74f 100644 (file)
@@ -348,7 +348,7 @@ static void idtg3_shutdown(struct rio_dev *rdev)
        }
 }
 
-static struct rio_device_id idtg3_id_table[] = {
+static const struct rio_device_id idtg3_id_table[] = {
        {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)},
        {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)},
        { 0, }  /* terminate list */
index 7fbb60d3179602240766871fde2ace6d3b303283..4058ce2c76faf210234c877dfa7d06018e3f790c 100644 (file)
@@ -168,7 +168,7 @@ static void idtcps_remove(struct rio_dev *rdev)
        spin_unlock(&rdev->rswitch->lock);
 }
 
-static struct rio_device_id idtcps_id_table[] = {
+static const struct rio_device_id idtcps_id_table[] = {
        {RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)},
        {RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)},
        {RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)},
index 8a43561b9d17f7d4b9dc3f67f4c2e3e521b84e14..1214628b7ded7d1b961ebd3d81d3645f21ce985f 100644 (file)
@@ -169,7 +169,7 @@ static void tsi568_remove(struct rio_dev *rdev)
        spin_unlock(&rdev->rswitch->lock);
 }
 
-static struct rio_device_id tsi568_id_table[] = {
+static const struct rio_device_id tsi568_id_table[] = {
        {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)},
        { 0, }  /* terminate list */
 };
index 2700d15f758423b5398e3d0047e94c7477be6274..9f063e214836a007fc9f6a425f8cbfed5bd7af9b 100644 (file)
@@ -336,7 +336,7 @@ static void tsi57x_remove(struct rio_dev *rdev)
        spin_unlock(&rdev->rswitch->lock);
 }
 
-static struct rio_device_id tsi57x_id_table[] = {
+static const struct rio_device_id tsi57x_id_table[] = {
        {RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)},
        {RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)},
        {RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)},
index bf04479456a050abb56290a71729a76f49a638b6..b609e1d3654ba65f13d480ce71834fb5b507773b 100644 (file)
@@ -28,7 +28,6 @@ config OMAP_REMOTEPROC
        depends on OMAP_IOMMU
        select MAILBOX
        select OMAP2PLUS_MBOX
-       select RPMSG_VIRTIO
        help
          Say y here to support OMAP's remote processors (dual M3
          and DSP on OMAP4) via the remote processor framework.
@@ -58,7 +57,6 @@ config DA8XX_REMOTEPROC
        tristate "DA8xx/OMAP-L13x remoteproc support"
        depends on ARCH_DAVINCI_DA8XX
        depends on DMA_CMA
-       select RPMSG_VIRTIO
        help
          Say y here to support DA8xx/OMAP-L13x remote processors via the
          remote processor framework.
@@ -79,7 +77,6 @@ config DA8XX_REMOTEPROC
 config KEYSTONE_REMOTEPROC
        tristate "Keystone Remoteproc support"
        depends on ARCH_KEYSTONE
-       select RPMSG_VIRTIO
        help
          Say Y here here to support Keystone remote processors (DSP)
          via the remote processor framework.
@@ -135,7 +132,6 @@ config ST_REMOTEPROC
        depends on ARCH_STI
        select MAILBOX
        select STI_MBOX
-       select RPMSG_VIRTIO
        help
          Say y here to support ST's adjunct processors via the remote
          processor framework.
index 2d3d5ac92c060260a35bfe8dd45c4809289ce77f..8a3fa2bcc9f699da150222335c37e3e96eb31f26 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/soc/qcom/mdt_loader.h>
 #include <linux/soc/qcom/smem.h>
 #include <linux/soc/qcom/smem_state.h>
+#include <linux/iopoll.h>
 
 #include "remoteproc_internal.h"
 #include "qcom_common.h"
@@ -64,6 +65,8 @@
 #define QDSP6SS_RESET_REG              0x014
 #define QDSP6SS_GFMUX_CTL_REG          0x020
 #define QDSP6SS_PWR_CTL_REG            0x030
+#define QDSP6SS_MEM_PWR_CTL            0x0B0
+#define QDSP6SS_STRAP_ACC              0x110
 
 /* AXI Halt Register Offsets */
 #define AXI_HALTREQ_REG                        0x0
 #define QDSS_BHS_ON                    BIT(21)
 #define QDSS_LDO_BYP                   BIT(22)
 
+/* QDSP6v56 parameters */
+#define QDSP6v56_LDO_BYP               BIT(25)
+#define QDSP6v56_BHS_ON                BIT(24)
+#define QDSP6v56_CLAMP_WL              BIT(21)
+#define QDSP6v56_CLAMP_QMC_MEM         BIT(22)
+#define HALT_CHECK_MAX_LOOPS           200
+#define QDSP6SS_XO_CBCR                0x0038
+#define QDSP6SS_ACC_OVERRIDE_VAL               0x20
+
 struct reg_info {
        struct regulator *reg;
        int uV;
@@ -110,6 +122,8 @@ struct rproc_hexagon_res {
        struct qcom_mss_reg_res *active_supply;
        char **proxy_clk_names;
        char **active_clk_names;
+       int version;
+       bool need_mem_protection;
 };
 
 struct q6v5 {
@@ -154,6 +168,16 @@ struct q6v5 {
 
        struct qcom_rproc_subdev smd_subdev;
        struct qcom_rproc_ssr ssr_subdev;
+       bool need_mem_protection;
+       int mpss_perm;
+       int mba_perm;
+       int version;
+};
+
+enum {
+       MSS_MSM8916,
+       MSS_MSM8974,
+       MSS_MSM8996,
 };
 
 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
@@ -289,6 +313,26 @@ static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc,
        return &table;
 }
 
+static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
+                                  bool remote_owner, phys_addr_t addr,
+                                  size_t size)
+{
+       struct qcom_scm_vmperm next;
+
+       if (!qproc->need_mem_protection)
+               return 0;
+       if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
+               return 0;
+       if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
+               return 0;
+
+       next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
+       next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
+
+       return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
+                                  current_perm, &next, 1);
+}
+
 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
 {
        struct q6v5 *qproc = rproc->priv;
@@ -353,33 +397,98 @@ static int q6v5proc_reset(struct q6v5 *qproc)
 {
        u32 val;
        int ret;
+       int i;
 
-       /* Assert resets, stop core */
-       val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
-       val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
-       writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
 
-       /* Enable power block headswitch, and wait for it to stabilize */
-       val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
-       val |= QDSS_BHS_ON | QDSS_LDO_BYP;
-       writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
-       udelay(1);
-
-       /*
-        * Turn on memories. L2 banks should be done individually
-        * to minimize inrush current.
-        */
-       val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
-       val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
-               Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
-       writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
-       val |= Q6SS_L2DATA_SLP_NRET_N_2;
-       writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
-       val |= Q6SS_L2DATA_SLP_NRET_N_1;
-       writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
-       val |= Q6SS_L2DATA_SLP_NRET_N_0;
-       writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+       if (qproc->version == MSS_MSM8996) {
+               /* Override the ACC value if required */
+               writel(QDSP6SS_ACC_OVERRIDE_VAL,
+                      qproc->reg_base + QDSP6SS_STRAP_ACC);
 
+               /* Assert resets, stop core */
+               val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+               val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+               writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+               /* BHS require xo cbcr to be enabled */
+               val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+               val |= 0x1;
+               writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+               /* Read CLKOFF bit to go low indicating CLK is enabled */
+               ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+                                        val, !(val & BIT(31)), 1,
+                                        HALT_CHECK_MAX_LOOPS);
+               if (ret) {
+                       dev_err(qproc->dev,
+                               "xo cbcr enabling timed out (rc:%d)\n", ret);
+                       return ret;
+               }
+               /* Enable power block headswitch and wait for it to stabilize */
+               val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= QDSP6v56_BHS_ON;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               udelay(1);
+
+               /* Put LDO in bypass mode */
+               val |= QDSP6v56_LDO_BYP;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+               /* Deassert QDSP6 compiler memory clamp */
+               val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val &= ~QDSP6v56_CLAMP_QMC_MEM;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+               /* Deassert memory peripheral sleep and L2 memory standby */
+               val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+               /* Turn on L1, L2, ETB and JU memories 1 at a time */
+               val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+               for (i = 19; i >= 0; i--) {
+                       val |= BIT(i);
+                       writel(val, qproc->reg_base +
+                                               QDSP6SS_MEM_PWR_CTL);
+                       /*
+                        * Read back value to ensure the write is done then
+                        * wait for 1us for both memory peripheral and data
+                        * array to turn on.
+                        */
+                       val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+                       udelay(1);
+               }
+               /* Remove word line clamp */
+               val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val &= ~QDSP6v56_CLAMP_WL;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+       } else {
+               /* Assert resets, stop core */
+               val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+               val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+               writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+               /* Enable power block headswitch and wait for it to stabilize */
+               val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               udelay(1);
+               /*
+                * Turn on memories. L2 banks should be done individually
+                * to minimize inrush current.
+                */
+               val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+                       Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= Q6SS_L2DATA_SLP_NRET_N_2;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= Q6SS_L2DATA_SLP_NRET_N_1;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= Q6SS_L2DATA_SLP_NRET_N_0;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+       }
        /* Remove IO clamp */
        val &= ~Q6SS_CLAMP_IO;
        writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
@@ -451,6 +560,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
 {
        unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
        dma_addr_t phys;
+       int mdata_perm;
+       int xferop_ret;
        void *ptr;
        int ret;
 
@@ -462,6 +573,17 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
 
        memcpy(ptr, fw->data, fw->size);
 
+       /* Hypervisor mapping to access metadata by modem */
+       mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
+       ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
+                                     true, phys, fw->size);
+       if (ret) {
+               dev_err(qproc->dev,
+                       "assigning Q6 access to metadata failed: %d\n", ret);
+               ret = -EAGAIN;
+               goto free_dma_attrs;
+       }
+
        writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
        writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
 
@@ -471,6 +593,14 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
        else if (ret < 0)
                dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
 
+       /* Metadata authentication done, remove modem access */
+       xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
+                                            false, phys, fw->size);
+       if (xferop_ret)
+               dev_warn(qproc->dev,
+                        "mdt buffer not reclaimed system may become unstable\n");
+
+free_dma_attrs:
        dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
 
        return ret < 0 ? ret : 0;
@@ -504,7 +634,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
        bool relocate = false;
        char seg_name[10];
        ssize_t offset;
-       size_t size;
+       size_t size = 0;
        void *ptr;
        int ret;
        int i;
@@ -542,7 +672,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
        }
 
        mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
-
+       /* Load firmware segments */
        for (i = 0; i < ehdr->e_phnum; i++) {
                phdr = &phdrs[i];
 
@@ -575,18 +705,24 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
                        memset(ptr + phdr->p_filesz, 0,
                               phdr->p_memsz - phdr->p_filesz);
                }
-
-               size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
-               if (!size) {
-                       boot_addr = relocate ? qproc->mpss_phys : min_addr;
-                       writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
-                       writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
-               }
-
                size += phdr->p_memsz;
-               writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
        }
 
+       /* Transfer ownership of modem ddr region to q6 */
+       ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
+                                     qproc->mpss_phys, qproc->mpss_size);
+       if (ret) {
+               dev_err(qproc->dev,
+                       "assigning Q6 access to mpss memory failed: %d\n", ret);
+               ret = -EAGAIN;
+               goto release_firmware;
+       }
+
+       boot_addr = relocate ? qproc->mpss_phys : min_addr;
+       writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+       writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+       writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
        ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
        if (ret == -ETIMEDOUT)
                dev_err(qproc->dev, "MPSS authentication timed out\n");
@@ -602,6 +738,7 @@ release_firmware:
 static int q6v5_start(struct rproc *rproc)
 {
        struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+       int xfermemop_ret;
        int ret;
 
        ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
@@ -637,11 +774,22 @@ static int q6v5_start(struct rproc *rproc)
                goto assert_reset;
        }
 
+       /* Assign MBA image access in DDR to q6 */
+       xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+                                               qproc->mba_phys,
+                                               qproc->mba_size);
+       if (xfermemop_ret) {
+               dev_err(qproc->dev,
+                       "assigning Q6 access to mba memory failed: %d\n",
+                       xfermemop_ret);
+               goto disable_active_clks;
+       }
+
        writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
 
        ret = q6v5proc_reset(qproc);
        if (ret)
-               goto halt_axi_ports;
+               goto reclaim_mba;
 
        ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
        if (ret == -ETIMEDOUT) {
@@ -658,16 +806,22 @@ static int q6v5_start(struct rproc *rproc)
 
        ret = q6v5_mpss_load(qproc);
        if (ret)
-               goto halt_axi_ports;
+               goto reclaim_mpss;
 
        ret = wait_for_completion_timeout(&qproc->start_done,
                                          msecs_to_jiffies(5000));
        if (ret == 0) {
                dev_err(qproc->dev, "start timed out\n");
                ret = -ETIMEDOUT;
-               goto halt_axi_ports;
+               goto reclaim_mpss;
        }
 
+       xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+                                               qproc->mba_phys,
+                                               qproc->mba_size);
+       if (xfermemop_ret)
+               dev_err(qproc->dev,
+                       "Failed to reclaim mba buffer system may become unstable\n");
        qproc->running = true;
 
        q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
@@ -677,12 +831,30 @@ static int q6v5_start(struct rproc *rproc)
 
        return 0;
 
+reclaim_mpss:
+       xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+                                               false, qproc->mpss_phys,
+                                               qproc->mpss_size);
+       WARN_ON(xfermemop_ret);
+
 halt_axi_ports:
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+
+reclaim_mba:
+       xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+                                               qproc->mba_phys,
+                                               qproc->mba_size);
+       if (xfermemop_ret) {
+               dev_err(qproc->dev,
+                       "Failed to reclaim mba buffer, system may become unstable\n");
+       }
+
+disable_active_clks:
        q6v5_clk_disable(qproc->dev, qproc->active_clks,
                         qproc->active_clk_count);
+
 assert_reset:
        reset_control_assert(qproc->mss_restart);
 disable_vdd:
@@ -702,6 +874,7 @@ static int q6v5_stop(struct rproc *rproc)
 {
        struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
        int ret;
+       u32 val;
 
        qproc->running = false;
 
@@ -718,6 +891,20 @@ static int q6v5_stop(struct rproc *rproc)
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+       if (qproc->version == MSS_MSM8996) {
+               /*
+                * To avoid high MX current during LPASS/MSS restart.
+                */
+               val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+               val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
+                       QDSP6v56_CLAMP_QMC_MEM;
+               writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+       }
+
+
+       ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
+                                     qproc->mpss_phys, qproc->mpss_size);
+       WARN_ON(ret);
 
        reset_control_assert(qproc->mss_restart);
        q6v5_clk_disable(qproc->dev, qproc->active_clks,
@@ -1017,6 +1204,8 @@ static int q6v5_probe(struct platform_device *pdev)
        if (ret)
                goto free_rproc;
 
+       qproc->version = desc->version;
+       qproc->need_mem_protection = desc->need_mem_protection;
        ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
        if (ret < 0)
                goto free_rproc;
@@ -1038,7 +1227,8 @@ static int q6v5_probe(struct platform_device *pdev)
                ret = PTR_ERR(qproc->state);
                goto free_rproc;
        }
-
+       qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
+       qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
        qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
        qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
 
@@ -1067,6 +1257,24 @@ static int q6v5_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct rproc_hexagon_res msm8996_mss = {
+       .hexagon_mba_image = "mba.mbn",
+       .proxy_clk_names = (char*[]){
+                       "xo",
+                       "pnoc",
+                       NULL
+       },
+       .active_clk_names = (char*[]){
+                       "iface",
+                       "bus",
+                       "mem",
+                       "gpll0_mss_clk",
+                       NULL
+       },
+       .need_mem_protection = true,
+       .version = MSS_MSM8996,
+};
+
 static const struct rproc_hexagon_res msm8916_mss = {
        .hexagon_mba_image = "mba.mbn",
        .proxy_supply = (struct qcom_mss_reg_res[]) {
@@ -1094,6 +1302,8 @@ static const struct rproc_hexagon_res msm8916_mss = {
                "mem",
                NULL
        },
+       .need_mem_protection = false,
+       .version = MSS_MSM8916,
 };
 
 static const struct rproc_hexagon_res msm8974_mss = {
@@ -1131,12 +1341,15 @@ static const struct rproc_hexagon_res msm8974_mss = {
                "mem",
                NULL
        },
+       .need_mem_protection = false,
+       .version = MSS_MSM8974,
 };
 
 static const struct of_device_id q6v5_of_match[] = {
        { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
        { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
        { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
+       { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
        { },
 };
 MODULE_DEVICE_TABLE(of, q6v5_of_match);
index 1c122e230cec5189fdec1a72897b7a74f0309b50..a20488336aa091b6a0ec69941b0e4bbeb1b84184 100644 (file)
@@ -155,6 +155,132 @@ static const struct file_operations rproc_recovery_ops = {
        .llseek = generic_file_llseek,
 };
 
+/* Expose resource table content via debugfs */
+static int rproc_rsc_table_show(struct seq_file *seq, void *p)
+{
+       static const char * const types[] = {"carveout", "devmem", "trace", "vdev"};
+       struct rproc *rproc = seq->private;
+       struct resource_table *table = rproc->table_ptr;
+       struct fw_rsc_carveout *c;
+       struct fw_rsc_devmem *d;
+       struct fw_rsc_trace *t;
+       struct fw_rsc_vdev *v;
+       int i, j;
+
+       if (!table) {
+               seq_puts(seq, "No resource table found\n");
+               return 0;
+       }
+
+       for (i = 0; i < table->num; i++) {
+               int offset = table->offset[i];
+               struct fw_rsc_hdr *hdr = (void *)table + offset;
+               void *rsc = (void *)hdr + sizeof(*hdr);
+
+               switch (hdr->type) {
+               case RSC_CARVEOUT:
+                       c = rsc;
+                       seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+                       seq_printf(seq, "  Device Address 0x%x\n", c->da);
+                       seq_printf(seq, "  Physical Address 0x%x\n", c->pa);
+                       seq_printf(seq, "  Length 0x%x Bytes\n", c->len);
+                       seq_printf(seq, "  Flags 0x%x\n", c->flags);
+                       seq_printf(seq, "  Reserved (should be zero) [%d]\n", c->reserved);
+                       seq_printf(seq, "  Name %s\n\n", c->name);
+                       break;
+               case RSC_DEVMEM:
+                       d = rsc;
+                       seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+                       seq_printf(seq, "  Device Address 0x%x\n", d->da);
+                       seq_printf(seq, "  Physical Address 0x%x\n", d->pa);
+                       seq_printf(seq, "  Length 0x%x Bytes\n", d->len);
+                       seq_printf(seq, "  Flags 0x%x\n", d->flags);
+                       seq_printf(seq, "  Reserved (should be zero) [%d]\n", d->reserved);
+                       seq_printf(seq, "  Name %s\n\n", d->name);
+                       break;
+               case RSC_TRACE:
+                       t = rsc;
+                       seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+                       seq_printf(seq, "  Device Address 0x%x\n", t->da);
+                       seq_printf(seq, "  Length 0x%x Bytes\n", t->len);
+                       seq_printf(seq, "  Reserved (should be zero) [%d]\n", t->reserved);
+                       seq_printf(seq, "  Name %s\n\n", t->name);
+                       break;
+               case RSC_VDEV:
+                       v = rsc;
+                       seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+
+                       seq_printf(seq, "  ID %d\n", v->id);
+                       seq_printf(seq, "  Notify ID %d\n", v->notifyid);
+                       seq_printf(seq, "  Device features 0x%x\n", v->dfeatures);
+                       seq_printf(seq, "  Guest features 0x%x\n", v->gfeatures);
+                       seq_printf(seq, "  Config length 0x%x\n", v->config_len);
+                       seq_printf(seq, "  Status 0x%x\n", v->status);
+                       seq_printf(seq, "  Number of vrings %d\n", v->num_of_vrings);
+                       seq_printf(seq, "  Reserved (should be zero) [%d][%d]\n\n",
+                                  v->reserved[0], v->reserved[1]);
+
+                       for (j = 0; j < v->num_of_vrings; j++) {
+                               seq_printf(seq, "  Vring %d\n", j);
+                               seq_printf(seq, "    Device Address 0x%x\n", v->vring[j].da);
+                               seq_printf(seq, "    Alignment %d\n", v->vring[j].align);
+                               seq_printf(seq, "    Number of buffers %d\n", v->vring[j].num);
+                               seq_printf(seq, "    Notify ID %d\n", v->vring[j].notifyid);
+                               seq_printf(seq, "    Physical Address 0x%x\n\n",
+                                          v->vring[j].pa);
+                       }
+                       break;
+               default:
+                       seq_printf(seq, "Unknown resource type found: %d [hdr: %p]\n",
+                                  hdr->type, hdr);
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static int rproc_rsc_table_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, rproc_rsc_table_show, inode->i_private);
+}
+
+static const struct file_operations rproc_rsc_table_ops = {
+       .open           = rproc_rsc_table_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/* Expose carveout content via debugfs */
+static int rproc_carveouts_show(struct seq_file *seq, void *p)
+{
+       struct rproc *rproc = seq->private;
+       struct rproc_mem_entry *carveout;
+
+       list_for_each_entry(carveout, &rproc->carveouts, node) {
+               seq_puts(seq, "Carveout memory entry:\n");
+               seq_printf(seq, "\tVirtual address: %p\n", carveout->va);
+               seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
+               seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
+               seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len);
+       }
+
+       return 0;
+}
+
+static int rproc_carveouts_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, rproc_carveouts_show, inode->i_private);
+}
+
+static const struct file_operations rproc_carveouts_ops = {
+       .open           = rproc_carveouts_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 void rproc_remove_trace_file(struct dentry *tfile)
 {
        debugfs_remove(tfile);
@@ -198,6 +324,10 @@ void rproc_create_debug_dir(struct rproc *rproc)
                            rproc, &rproc_name_ops);
        debugfs_create_file("recovery", 0400, rproc->dbg_dir,
                            rproc, &rproc_recovery_ops);
+       debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
+                           rproc, &rproc_rsc_table_ops);
+       debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
+                           rproc, &rproc_carveouts_ops);
 }
 
 void __init rproc_init_debugfs(void)
index 0fe6eac465121d6bbf7c8c1ce9a8d06d21c0a7a6..65a9f6b892f06a9ffc06e93181a84cadefab09d2 100644 (file)
@@ -47,7 +47,8 @@ config RPMSG_QCOM_SMD
          platforms.
 
 config RPMSG_VIRTIO
-       tristate
+       tristate "Virtio RPMSG bus driver"
+       depends on HAS_DMA
        select RPMSG
        select VIRTIO
 
index 5dcc9bf1c5bc5de65af2bfc5d778c4d494b20cee..40d76d2a5efff58339c0b823be3678591c99482e 100644 (file)
@@ -227,6 +227,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
 
        init_completion(&channel->open_req);
        init_completion(&channel->open_ack);
+       init_completion(&channel->intent_req_comp);
 
        INIT_LIST_HEAD(&channel->done_intents);
        INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
@@ -1148,19 +1149,38 @@ static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
 static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
 {
        struct glink_channel *channel = to_glink_channel(rpdev->ept);
-       struct glink_core_rx_intent *intent;
+       struct device_node *np = rpdev->dev.of_node;
        struct qcom_glink *glink = channel->glink;
-       int num_intents = glink->intentless ? 0 : 5;
+       struct glink_core_rx_intent *intent;
+       const struct property *prop = NULL;
+       __be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) };
+       int num_intents;
+       int num_groups = 1;
+       __be32 *val = defaults;
+       int size;
+
+       if (glink->intentless)
+               return 0;
+
+       prop = of_find_property(np, "qcom,intents", NULL);
+       if (prop) {
+               val = prop->value;
+               num_groups = prop->length / sizeof(u32) / 2;
+       }
 
        /* Channel is now open, advertise base set of intents */
-       while (num_intents--) {
-               intent = qcom_glink_alloc_intent(glink, channel, SZ_1K, true);
-               if (!intent)
-                       break;
+       while (num_groups--) {
+               size = be32_to_cpup(val++);
+               num_intents = be32_to_cpup(val++);
+               while (num_intents--) {
+                       intent = qcom_glink_alloc_intent(glink, channel, size,
+                                                        true);
+                       if (!intent)
+                               break;
 
-               qcom_glink_advertise_intent(glink, channel, intent);
+                       qcom_glink_advertise_intent(glink, channel, intent);
+               }
        }
-
        return 0;
 }
 
@@ -1237,11 +1257,16 @@ static int __qcom_glink_send(struct glink_channel *channel,
                        spin_lock_irqsave(&channel->intent_lock, flags);
                        idr_for_each_entry(&channel->riids, tmp, iid) {
                                if (tmp->size >= len && !tmp->in_use) {
-                                       tmp->in_use = true;
-                                       intent = tmp;
-                                       break;
+                                       if (!intent)
+                                               intent = tmp;
+                                       else if (intent->size > tmp->size)
+                                               intent = tmp;
+                                       if (intent->size == len)
+                                               break;
                                }
                        }
+                       if (intent)
+                               intent->in_use = true;
                        spin_unlock_irqrestore(&channel->intent_lock, flags);
 
                        /* We found an available intent */
@@ -1551,6 +1576,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
        idr_init(&glink->rcids);
 
        glink->mbox_client.dev = dev;
+       glink->mbox_client.knows_txdone = true;
        glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
        if (IS_ERR(glink->mbox_chan)) {
                if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
@@ -1616,3 +1642,6 @@ void qcom_glink_native_unregister(struct qcom_glink *glink)
        device_unregister(glink->dev);
 }
 EXPORT_SYMBOL_GPL(qcom_glink_native_unregister);
+
+MODULE_DESCRIPTION("Qualcomm GLINK driver");
+MODULE_LICENSE("GPL v2");
index e0e58f3b14209a7fe908adb0cf8ca544a93c65a2..b59a31b079a5347125a7ec55e56f92a75d06220f 100644 (file)
@@ -433,6 +433,19 @@ config RTC_DRV_PCF85063
          This driver can also be built as a module. If so, the module
          will be called rtc-pcf85063.
 
+config RTC_DRV_PCF85363
+       tristate "NXP PCF85363"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         If you say yes here you get support for the PCF85363 RTC chip.
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-pcf85363.
+
+         The nvmem interface will be named pcf85363-#, where # is the
+         zero-based instance number.
+
 config RTC_DRV_PCF8563
        tristate "Philips PCF8563/Epson RTC8564"
        help
@@ -1174,6 +1187,17 @@ config RTC_DRV_WM8350
          This driver can also be built as a module. If so, the module
          will be called "rtc-wm8350".
 
+config RTC_DRV_SC27XX
+       tristate "Spreadtrum SC27xx RTC"
+       depends on MFD_SC27XX_PMIC || COMPILE_TEST
+       help
+         If you say Y here you will get support for the RTC subsystem
+         of the Spreadtrum SC27xx series PMICs. The SC27xx series PMICs
+         includes the SC2720, SC2721, SC2723, SC2730 and SC2731 chips.
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-sc27xx.
+
 config RTC_DRV_SPEAR
        tristate "SPEAR ST RTC"
        depends on PLAT_SPEAR || COMPILE_TEST
@@ -1706,14 +1730,24 @@ config RTC_DRV_MOXART
           will be called rtc-moxart
 
 config RTC_DRV_MT6397
-       tristate "Mediatek Real Time Clock driver"
+       tristate "MediaTek PMIC based RTC"
        depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
        help
-         This selects the Mediatek(R) RTC driver. RTC is part of Mediatek
+         This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
          MT6397 PMIC. You should enable MT6397 PMIC MFD before select
-         Mediatek(R) RTC driver.
+         MediaTek(R) RTC driver.
+
+         If you want to use MediaTek(R) RTC interface, select Y or M here.
 
-         If you want to use Mediatek(R) RTC interface, select Y or M here.
+config RTC_DRV_MT7622
+       tristate "MediaTek SoC based RTC"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       help
+         This enables support for the real time clock built in the MediaTek
+         SoCs.
+
+         This drive can also be built as a module. If so, the module
+         will be called rtc-mt7622.
 
 config RTC_DRV_XGENE
        tristate "APM X-Gene RTC"
index 0bf1fc02b82c451348ba6110457bd8189b5e73e5..f2f50c11dc387d93d7cc0e16d1f779ae180b1596 100644 (file)
@@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MPC5121)       += rtc-mpc5121.o
 obj-$(CONFIG_RTC_DRV_VRTC)     += rtc-mrst.o
 obj-$(CONFIG_RTC_DRV_MSM6242)  += rtc-msm6242.o
 obj-$(CONFIG_RTC_DRV_MT6397)   += rtc-mt6397.o
+obj-$(CONFIG_RTC_DRV_MT7622)   += rtc-mt7622.o
 obj-$(CONFIG_RTC_DRV_MV)       += rtc-mv.o
 obj-$(CONFIG_RTC_DRV_MXC)      += rtc-mxc.o
 obj-$(CONFIG_RTC_DRV_NUC900)   += rtc-nuc900.o
@@ -114,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_PCF2123)       += rtc-pcf2123.o
 obj-$(CONFIG_RTC_DRV_PCF2127)  += rtc-pcf2127.o
 obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
 obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
+obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o
 obj-$(CONFIG_RTC_DRV_PCF8523)  += rtc-pcf8523.o
 obj-$(CONFIG_RTC_DRV_PCF8563)  += rtc-pcf8563.o
 obj-$(CONFIG_RTC_DRV_PCF8583)  += rtc-pcf8583.o
@@ -144,6 +146,7 @@ obj-$(CONFIG_RTC_DRV_S35390A)       += rtc-s35390a.o
 obj-$(CONFIG_RTC_DRV_S3C)      += rtc-s3c.o
 obj-$(CONFIG_RTC_DRV_S5M)      += rtc-s5m.o
 obj-$(CONFIG_RTC_DRV_SA1100)   += rtc-sa1100.o
+obj-$(CONFIG_RTC_DRV_SC27XX)   += rtc-sc27xx.o
 obj-$(CONFIG_RTC_DRV_SH)       += rtc-sh.o
 obj-$(CONFIG_RTC_DRV_SIRFSOC)  += rtc-sirfsoc.o
 obj-$(CONFIG_RTC_DRV_SNVS)     += rtc-snvs.o
index 8cec9a02c0b8937fdcdca2fdd80d14dba4e79b7c..672b192f8153aa00665daaefc541ebaf601cfe74 100644 (file)
@@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        }
 
        timerqueue_add(&rtc->timerqueue, &timer->node);
-       if (!next) {
+       if (!next || ktime_before(timer->node.expires, next->expires)) {
                struct rtc_wkalrm alarm;
                int err;
                alarm.time = rtc_ktime_to_tm(timer->node.expires);
@@ -1004,6 +1004,10 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
  * to compensate for differences in the actual clock rate due to temperature,
  * the crystal, capacitor, etc.
  *
+ * The adjustment applied is as follows:
+ *   t = t0 * (1 + offset * 1e-9)
+ * where t0 is the measured length of 1 RTC second with offset = 0
+ *
  * Kernel interface to adjust an rtc clock offset.
  * Return 0 on success, or a negative number on error.
  * If the rtc offset is not setable (or not implemented), return -EINVAL
index fea9a60b06cf6dd25b1a078bf6e812265aa0acc1..b033bc556f5d29b4d803ff6506685c3dc940aee1 100644 (file)
@@ -614,12 +614,12 @@ static int abx80x_probe(struct i2c_client *client,
        if (err)
                return err;
 
-       rtc = devm_rtc_device_register(&client->dev, "abx8xx",
-                                      &abx80x_rtc_ops, THIS_MODULE);
-
+       rtc = devm_rtc_allocate_device(&client->dev);
        if (IS_ERR(rtc))
                return PTR_ERR(rtc);
 
+       rtc->ops = &abx80x_rtc_ops;
+
        i2c_set_clientdata(client, rtc);
 
        if (client->irq > 0) {
@@ -646,10 +646,14 @@ static int abx80x_probe(struct i2c_client *client,
        err = devm_add_action_or_reset(&client->dev,
                                       rtc_calib_remove_sysfs_group,
                                       &client->dev);
-       if (err)
+       if (err) {
                dev_err(&client->dev,
                        "Failed to add sysfs cleanup action: %d\n",
                        err);
+               return err;
+       }
+
+       err = rtc_register_device(rtc);
 
        return err;
 }
index 21f355c37eab53cd4de0668e7250d1d578344e71..1e4978c96ffd273c617e9c8d89f49a9b2a0838ec 100644 (file)
@@ -28,6 +28,8 @@
 #define RTC_IRQ_AL_EN              BIT(0)
 #define RTC_IRQ_FREQ_EN                    BIT(1)
 #define RTC_IRQ_FREQ_1HZ           BIT(2)
+#define RTC_CCR                    0x18
+#define RTC_CCR_MODE               BIT(15)
 
 #define RTC_TIME           0xC
 #define RTC_ALARM1         0x10
@@ -343,18 +345,117 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+/*
+ * The information given in the Armada 388 functional spec is complex.
+ * They give two different formulas for calculating the offset value,
+ * but when considering "Offset" as an 8-bit signed integer, they both
+ * reduce down to (we shall rename "Offset" as "val" here):
+ *
+ *   val = (f_ideal / f_measured - 1) / resolution   where f_ideal = 32768
+ *
+ * Converting to time, f = 1/t:
+ *   val = (t_measured / t_ideal - 1) / resolution   where t_ideal = 1/32768
+ *
+ *   =>  t_measured / t_ideal = val * resolution + 1
+ *
+ * "offset" in the RTC interface is defined as:
+ *   t = t0 * (1 + offset * 1e-9)
+ * where t is the desired period, t0 is the measured period with a zero
+ * offset, which is t_measured above. With t0 = t_measured and t = t_ideal,
+ *   offset = (t_ideal / t_measured - 1) / 1e-9
+ *
+ *   => t_ideal / t_measured = offset * 1e-9 + 1
+ *
+ * so:
+ *
+ *   offset * 1e-9 + 1 = 1 / (val * resolution + 1)
+ *
+ * We want "resolution" to be an integer, so resolution = R * 1e-9, giving
+ *   offset = 1e18 / (val * R + 1e9) - 1e9
+ *   val = (1e18 / (offset + 1e9) - 1e9) / R
+ * with a common transformation:
+ *   f(x) = 1e18 / (x + 1e9) - 1e9
+ *   offset = f(val * R)
+ *   val = f(offset) / R
+ *
+ * Armada 38x supports two modes, fine mode (954ppb) and coarse mode (3815ppb).
+ */
+static long armada38x_ppb_convert(long ppb)
+{
+       long div = ppb + 1000000000L;
+
+       return div_s64(1000000000000000000LL + div / 2, div) - 1000000000L;
+}
+
+static int armada38x_rtc_read_offset(struct device *dev, long *offset)
+{
+       struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+       unsigned long ccr, flags;
+       long ppb_cor;
+
+       spin_lock_irqsave(&rtc->lock, flags);
+       ccr = rtc->data->read_rtc_reg(rtc, RTC_CCR);
+       spin_unlock_irqrestore(&rtc->lock, flags);
+
+       ppb_cor = (ccr & RTC_CCR_MODE ? 3815 : 954) * (s8)ccr;
+       /* ppb_cor + 1000000000L can never be zero */
+       *offset = armada38x_ppb_convert(ppb_cor);
+
+       return 0;
+}
+
+static int armada38x_rtc_set_offset(struct device *dev, long offset)
+{
+       struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+       unsigned long ccr = 0;
+       long ppb_cor, off;
+
+       /*
+        * The maximum ppb_cor is -128 * 3815 .. 127 * 3815, but we
+        * need to clamp the input.  This equates to -484270 .. 488558.
+        * Not only is this to stop out of range "off" but also to
+        * avoid the division by zero in armada38x_ppb_convert().
+        */
+       offset = clamp(offset, -484270L, 488558L);
+
+       ppb_cor = armada38x_ppb_convert(offset);
+
+       /*
+        * Use low update mode where possible, which gives a better
+        * resolution of correction.
+        */
+       off = DIV_ROUND_CLOSEST(ppb_cor, 954);
+       if (off > 127 || off < -128) {
+               ccr = RTC_CCR_MODE;
+               off = DIV_ROUND_CLOSEST(ppb_cor, 3815);
+       }
+
+       /*
+        * Armada 388 requires a bit pattern in bits 14..8 depending on
+        * the sign bit: { 0, ~S, S, S, S, S, S }
+        */
+       ccr |= (off & 0x3fff) ^ 0x2000;
+       rtc_delayed_write(ccr, rtc, RTC_CCR);
+
+       return 0;
+}
+
 static const struct rtc_class_ops armada38x_rtc_ops = {
        .read_time = armada38x_rtc_read_time,
        .set_time = armada38x_rtc_set_time,
        .read_alarm = armada38x_rtc_read_alarm,
        .set_alarm = armada38x_rtc_set_alarm,
        .alarm_irq_enable = armada38x_rtc_alarm_irq_enable,
+       .read_offset = armada38x_rtc_read_offset,
+       .set_offset = armada38x_rtc_set_offset,
 };
 
 static const struct rtc_class_ops armada38x_rtc_ops_noirq = {
        .read_time = armada38x_rtc_read_time,
        .set_time = armada38x_rtc_set_time,
        .read_alarm = armada38x_rtc_read_alarm,
+       .read_offset = armada38x_rtc_read_offset,
+       .set_offset = armada38x_rtc_set_offset,
 };
 
 static const struct armada38x_rtc_data armada38x_data = {
index e221b78b6f106ec3ccde7c3326b5934c48153f9c..de81ecedd571d8e0a1ff5ba1f5df59673d5efc60 100644 (file)
@@ -42,8 +42,6 @@
 #define at91_rtc_write(field, val) \
        writel_relaxed((val), at91_rtc_regs + field)
 
-#define AT91_RTC_EPOCH         1900UL  /* just like arch/arm/common/rtctime.c */
-
 struct at91_rtc_config {
        bool use_shadow_imr;
 };
@@ -51,7 +49,6 @@ struct at91_rtc_config {
 static const struct at91_rtc_config *at91_rtc_config;
 static DECLARE_COMPLETION(at91_rtc_updated);
 static DECLARE_COMPLETION(at91_rtc_upd_rdy);
-static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
 static void __iomem *at91_rtc_regs;
 static int irq;
 static DEFINE_SPINLOCK(at91_rtc_lock);
@@ -131,8 +128,7 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
 
        /*
         * The Calendar Alarm register does not have a field for
-        * the year - so these will return an invalid value.  When an
-        * alarm is set, at91_alarm_year will store the current year.
+        * the year - so these will return an invalid value.
         */
        tm->tm_year  = bcd2bin(date & AT91_RTC_CENT) * 100;     /* century */
        tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8);    /* year */
@@ -208,15 +204,14 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
        struct rtc_time *tm = &alrm->time;
 
        at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm);
-       tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
-       tm->tm_year = at91_alarm_year - 1900;
+       tm->tm_year = -1;
 
        alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
                        ? 1 : 0;
 
-       dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
-               1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
-               tm->tm_hour, tm->tm_min, tm->tm_sec);
+       dev_dbg(dev, "%s(): %02d-%02d %02d:%02d:%02d %sabled\n", __func__,
+               tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec,
+               alrm->enabled ? "en" : "dis");
 
        return 0;
 }
@@ -230,8 +225,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
 
-       at91_alarm_year = tm.tm_year;
-
        tm.tm_mon = alrm->time.tm_mon;
        tm.tm_mday = alrm->time.tm_mday;
        tm.tm_hour = alrm->time.tm_hour;
@@ -255,7 +248,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        }
 
        dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
-               at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
+               tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
                tm.tm_min, tm.tm_sec);
 
        return 0;
index 00efe24a60633527aa1e9847234612ebbdb3edb4..215eac68ae2d72afda8c0e114755485ce2f10185 100644 (file)
@@ -71,9 +71,9 @@ static void rtc_uie_task(struct work_struct *work)
        if (num)
                rtc_handle_legacy_irq(rtc, num, RTC_UF);
 }
-static void rtc_uie_timer(unsigned long data)
+static void rtc_uie_timer(struct timer_list *t)
 {
-       struct rtc_device *rtc = (struct rtc_device *)data;
+       struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&rtc->irq_lock, flags);
@@ -460,7 +460,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
 
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
        INIT_WORK(&rtc->uie_task, rtc_uie_task);
-       setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+       timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
 #endif
 
        cdev_init(&rtc->char_dev, &rtc_dev_fops);
index 72b22935eb62a0e9d71da4552ab64d0ace6ccd39..d8df2e9e14adb94e45a01655ee773cb76f10e28a 100644 (file)
@@ -514,56 +514,43 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x,
        spi_message_add_tail(x, m);
 }
 
-static ssize_t
-ds1305_nvram_read(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t off, size_t count)
+static int ds1305_nvram_read(void *priv, unsigned int off, void *buf,
+                            size_t count)
 {
-       struct spi_device       *spi;
+       struct ds1305           *ds1305 = priv;
+       struct spi_device       *spi = ds1305->spi;
        u8                      addr;
        struct spi_message      m;
        struct spi_transfer     x[2];
-       int                     status;
-
-       spi = to_spi_device(kobj_to_dev(kobj));
 
        addr = DS1305_NVRAM + off;
        msg_init(&m, x, &addr, count, NULL, buf);
 
-       status = spi_sync(spi, &m);
-       if (status < 0)
-               dev_err(&spi->dev, "nvram %s error %d\n", "read", status);
-       return (status < 0) ? status : count;
+       return spi_sync(spi, &m);
 }
 
-static ssize_t
-ds1305_nvram_write(struct file *filp, struct kobject *kobj,
-               struct bin_attribute *attr,
-               char *buf, loff_t off, size_t count)
+static int ds1305_nvram_write(void *priv, unsigned int off, void *buf,
+                             size_t count)
 {
-       struct spi_device       *spi;
+       struct ds1305           *ds1305 = priv;
+       struct spi_device       *spi = ds1305->spi;
        u8                      addr;
        struct spi_message      m;
        struct spi_transfer     x[2];
-       int                     status;
-
-       spi = to_spi_device(kobj_to_dev(kobj));
 
        addr = (DS1305_WRITE | DS1305_NVRAM) + off;
        msg_init(&m, x, &addr, count, buf, NULL);
 
-       status = spi_sync(spi, &m);
-       if (status < 0)
-               dev_err(&spi->dev, "nvram %s error %d\n", "write", status);
-       return (status < 0) ? status : count;
+       return spi_sync(spi, &m);
 }
 
-static struct bin_attribute nvram = {
-       .attr.name      = "nvram",
-       .attr.mode      = S_IRUGO | S_IWUSR,
-       .read           = ds1305_nvram_read,
-       .write          = ds1305_nvram_write,
-       .size           = DS1305_NVRAM_LEN,
+static struct nvmem_config ds1305_nvmem_cfg = {
+       .name = "ds1305_nvram",
+       .word_size = 1,
+       .stride = 1,
+       .size = DS1305_NVRAM_LEN,
+       .reg_read = ds1305_nvram_read,
+       .reg_write = ds1305_nvram_write,
 };
 
 /*----------------------------------------------------------------------*/
@@ -708,10 +695,19 @@ static int ds1305_probe(struct spi_device *spi)
                dev_dbg(&spi->dev, "AM/PM\n");
 
        /* register RTC ... from here on, ds1305->ctrl needs locking */
-       ds1305->rtc = devm_rtc_device_register(&spi->dev, "ds1305",
-                       &ds1305_ops, THIS_MODULE);
+       ds1305->rtc = devm_rtc_allocate_device(&spi->dev);
        if (IS_ERR(ds1305->rtc)) {
-               status = PTR_ERR(ds1305->rtc);
+               return PTR_ERR(ds1305->rtc);
+       }
+
+       ds1305->rtc->ops = &ds1305_ops;
+
+       ds1305_nvmem_cfg.priv = ds1305;
+       ds1305->rtc->nvmem_config = &ds1305_nvmem_cfg;
+       ds1305->rtc->nvram_old_abi = true;
+
+       status = rtc_register_device(ds1305->rtc);
+       if (status) {
                dev_dbg(&spi->dev, "register rtc --> %d\n", status);
                return status;
        }
@@ -734,12 +730,6 @@ static int ds1305_probe(struct spi_device *spi)
                }
        }
 
-       /* export NVRAM */
-       status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
-       if (status < 0) {
-               dev_err(&spi->dev, "register nvram --> %d\n", status);
-       }
-
        return 0;
 }
 
@@ -747,8 +737,6 @@ static int ds1305_remove(struct spi_device *spi)
 {
        struct ds1305 *ds1305 = spi_get_drvdata(spi);
 
-       sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
-
        /* carefully shut down irq and workqueue, if present */
        if (spi->irq) {
                set_bit(FLAG_EXITING, &ds1305->flags);
index e7d9215c9201b14fe66aaefb8c7379aeb8ae6e00..923dde912f604094219c5860794080888308df1f 100644 (file)
@@ -325,6 +325,10 @@ static const struct of_device_id ds1307_of_match[] = {
                .compatible = "isil,isl12057",
                .data = (void *)ds_1337
        },
+       {
+               .compatible = "epson,rx8130",
+               .data = (void *)rx_8130
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, ds1307_of_match);
@@ -348,6 +352,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = {
        { .id = "PT7C4338", .driver_data = ds_1307 },
        { .id = "RX8025", .driver_data = rx_8025 },
        { .id = "ISL12057", .driver_data = ds_1337 },
+       { .id = "RX8130", .driver_data = rx_8130 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
@@ -787,8 +792,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
  * Alarm support for mcp794xx devices.
  */
 
-#define MCP794XX_REG_WEEKDAY           0x3
-#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7
 #define MCP794XX_REG_CONTROL           0x07
 #      define MCP794XX_BIT_ALM0_EN     0x10
 #      define MCP794XX_BIT_ALM1_EN     0x20
@@ -877,15 +880,38 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        return 0;
 }
 
+/*
+ * We may have a random RTC weekday, therefore calculate alarm weekday based
+ * on current weekday we read from the RTC timekeeping regs
+ */
+static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm)
+{
+       struct rtc_time tm_now;
+       int days_now, days_alarm, ret;
+
+       ret = ds1307_get_time(dev, &tm_now);
+       if (ret)
+               return ret;
+
+       days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60);
+       days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60);
+
+       return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1;
+}
+
 static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
        struct ds1307 *ds1307 = dev_get_drvdata(dev);
        unsigned char regs[10];
-       int ret;
+       int wday, ret;
 
        if (!test_bit(HAS_ALARM, &ds1307->flags))
                return -EINVAL;
 
+       wday = mcp794xx_alm_weekday(dev, &t->time);
+       if (wday < 0)
+               return wday;
+
        dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
                "enabled=%d pending=%d\n", __func__,
                t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
@@ -902,7 +928,7 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
        regs[3] = bin2bcd(t->time.tm_sec);
        regs[4] = bin2bcd(t->time.tm_min);
        regs[5] = bin2bcd(t->time.tm_hour);
-       regs[6] = bin2bcd(t->time.tm_wday + 1);
+       regs[6] = wday;
        regs[7] = bin2bcd(t->time.tm_mday);
        regs[8] = bin2bcd(t->time.tm_mon + 1);
 
@@ -1354,14 +1380,12 @@ static int ds1307_probe(struct i2c_client *client,
 {
        struct ds1307           *ds1307;
        int                     err = -ENODEV;
-       int                     tmp, wday;
+       int                     tmp;
        const struct chip_desc  *chip;
        bool                    want_irq;
        bool                    ds1307_can_wakeup_device = false;
        unsigned char           regs[8];
        struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
-       struct rtc_time         tm;
-       unsigned long           timestamp;
        u8                      trickle_charger_setup = 0;
 
        ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL);
@@ -1641,25 +1665,6 @@ read_rtc:
                             bin2bcd(tmp));
        }
 
-       /*
-        * Some IPs have weekday reset value = 0x1 which might not correct
-        * hence compute the wday using the current date/month/year values
-        */
-       ds1307_get_time(ds1307->dev, &tm);
-       wday = tm.tm_wday;
-       timestamp = rtc_tm_to_time64(&tm);
-       rtc_time64_to_tm(timestamp, &tm);
-
-       /*
-        * Check if reset wday is different from the computed wday
-        * If different then set the wday which we computed using
-        * timestamp
-        */
-       if (wday != tm.tm_wday)
-               regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY,
-                                  MCP794XX_REG_WEEKDAY_WDAY_MASK,
-                                  tm.tm_wday + 1);
-
        if (want_irq || ds1307_can_wakeup_device) {
                device_set_wakeup_capable(ds1307->dev, true);
                set_bit(HAS_ALARM, &ds1307->flags);
index aa0d2c6f1edc235d00d2da56be0d55b9d88ed44a..4d5b007d7fc68cfbe71cc6f4c577b5bc4cc357e3 100644 (file)
@@ -216,9 +216,16 @@ static int ds1390_probe(struct spi_device *spi)
        return res;
 }
 
+static const struct of_device_id ds1390_of_match[] = {
+       { .compatible = "dallas,ds1390" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, ds1390_of_match);
+
 static struct spi_driver ds1390_driver = {
        .driver = {
                .name   = "rtc-ds1390",
+               .of_match_table = of_match_ptr(ds1390_of_match),
        },
        .probe  = ds1390_probe,
 };
index 1b2dcb58c0abf84801a83e2974651f55860ac0d1..1e95312a6f2eecdce510a7cdd79a2b59fa37f037 100644 (file)
@@ -398,42 +398,37 @@ static const struct rtc_class_ops ds1511_rtc_ops = {
        .alarm_irq_enable       = ds1511_rtc_alarm_irq_enable,
 };
 
-static ssize_t
-ds1511_nvram_read(struct file *filp, struct kobject *kobj,
-                 struct bin_attribute *ba,
-                 char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_read(void *priv, unsigned int pos, void *buf,
+                            size_t size)
 {
-       ssize_t count;
+       int i;
 
        rtc_write(pos, DS1511_RAMADDR_LSB);
-       for (count = 0; count < size; count++)
-               *buf++ = rtc_read(DS1511_RAMDATA);
+       for (i = 0; i < size; i++)
+               *(char *)buf++ = rtc_read(DS1511_RAMDATA);
 
-       return count;
+       return 0;
 }
 
-static ssize_t
-ds1511_nvram_write(struct file *filp, struct kobject *kobj,
-                  struct bin_attribute *bin_attr,
-                  char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
+                             size_t size)
 {
-       ssize_t count;
+       int i;
 
        rtc_write(pos, DS1511_RAMADDR_LSB);
-       for (count = 0; count < size; count++)
-               rtc_write(*buf++, DS1511_RAMDATA);
+       for (i = 0; i < size; i++)
+               rtc_write(*(char *)buf++, DS1511_RAMDATA);
 
-       return count;
+       return 0;
 }
 
-static struct bin_attribute ds1511_nvram_attr = {
-       .attr = {
-               .name = "nvram",
-               .mode = S_IRUGO | S_IWUSR,
-       },
+static struct nvmem_config ds1511_nvmem_cfg = {
+       .name = "ds1511_nvram",
+       .word_size = 1,
+       .stride = 1,
        .size = DS1511_RAM_MAX,
-       .read = ds1511_nvram_read,
-       .write = ds1511_nvram_write,
+       .reg_read = ds1511_nvram_read,
+       .reg_write = ds1511_nvram_write,
 };
 
 static int ds1511_rtc_probe(struct platform_device *pdev)
@@ -477,11 +472,20 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
        spin_lock_init(&pdata->lock);
        platform_set_drvdata(pdev, pdata);
 
-       pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-                                             &ds1511_rtc_ops, THIS_MODULE);
+       pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
        if (IS_ERR(pdata->rtc))
                return PTR_ERR(pdata->rtc);
 
+       pdata->rtc->ops = &ds1511_rtc_ops;
+
+       ds1511_nvmem_cfg.priv = &pdev->dev;
+       pdata->rtc->nvmem_config = &ds1511_nvmem_cfg;
+       pdata->rtc->nvram_old_abi = true;
+
+       ret = rtc_register_device(pdata->rtc);
+       if (ret)
+               return ret;
+
        /*
         * if the platform has an interrupt in mind for this device,
         * then by all means, set it
@@ -496,26 +500,6 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
                }
        }
 
-       ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
-       if (ret)
-               dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n",
-                       ds1511_nvram_attr.attr.name);
-
-       return 0;
-}
-
-static int ds1511_rtc_remove(struct platform_device *pdev)
-{
-       struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
-       sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
-       if (pdata->irq > 0) {
-               /*
-                * disable the alarm interrupt
-                */
-               rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
-               rtc_read(RTC_CMD1);
-       }
        return 0;
 }
 
@@ -524,7 +508,6 @@ MODULE_ALIAS("platform:ds1511");
 
 static struct platform_driver ds1511_rtc_driver = {
        .probe          = ds1511_rtc_probe,
-       .remove         = ds1511_rtc_remove,
        .driver         = {
                .name   = "ds1511",
        },
index 64989afffa3daada4b062321c527f18bca142bbb..ff65a7d2b9c9366c689d2efa09b5cc99ec3b5735 100644 (file)
@@ -82,7 +82,7 @@ static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
 static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
 {
        uint32_t ctrl;
-       int timeout = 1000;
+       int timeout = 10000;
 
        do {
                ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
@@ -94,7 +94,7 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
 static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
 {
        uint32_t ctrl;
-       int ret, timeout = 1000;
+       int ret, timeout = 10000;
 
        ret = jz4740_rtc_wait_write_ready(rtc);
        if (ret != 0)
@@ -368,7 +368,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
                ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
                ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
                if (ret) {
-                       dev_err(&pdev->dev, "Could not write write to RTC registers\n");
+                       dev_err(&pdev->dev, "Could not write to RTC registers\n");
                        return ret;
                }
        }
index f4c070ea83849a9e67f700519f0f83b8b7541227..c90fba3ed861881c0c813361dfaece9efac60938 100644 (file)
@@ -154,6 +154,8 @@ struct m41t80_data {
        struct rtc_device *rtc;
 #ifdef CONFIG_COMMON_CLK
        struct clk_hw sqw;
+       unsigned long freq;
+       unsigned int sqwe;
 #endif
 };
 
@@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
 #ifdef CONFIG_COMMON_CLK
 #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
 
-static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
-                                           unsigned long parent_rate)
+static unsigned long m41t80_decode_freq(int setting)
+{
+       return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
+               M41T80_SQW_MAX_FREQ >> setting;
+}
+
+static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
 {
-       struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
        struct i2c_client *client = m41t80->client;
        int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
                M41T80_REG_WDAY : M41T80_REG_SQW;
        int ret = i2c_smbus_read_byte_data(client, reg_sqw);
-       unsigned long val = M41T80_SQW_MAX_FREQ;
 
        if (ret < 0)
                return 0;
+       return m41t80_decode_freq(ret >> 4);
+}
 
-       ret >>= 4;
-       if (ret == 0)
-               val = 0;
-       else if (ret > 1)
-               val = val / (1 << ret);
-
-       return val;
+static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
+                                           unsigned long parent_rate)
+{
+       return sqw_to_m41t80_data(hw)->freq;
 }
 
 static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
                                  unsigned long *prate)
 {
-       int i, freq = M41T80_SQW_MAX_FREQ;
-
-       if (freq <= rate)
-               return freq;
-
-       for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
-               freq /= 1 << i;
-               if (freq <= rate)
-                       return freq;
-       }
-
-       return 0;
+       if (rate >= M41T80_SQW_MAX_FREQ)
+               return M41T80_SQW_MAX_FREQ;
+       if (rate >= M41T80_SQW_MAX_FREQ / 4)
+               return M41T80_SQW_MAX_FREQ / 4;
+       if (!rate)
+               return 0;
+       return 1 << ilog2(rate);
 }
 
 static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
                M41T80_REG_WDAY : M41T80_REG_SQW;
        int reg, ret, val = 0;
 
-       if (rate) {
-               if (!is_power_of_2(rate))
-                       return -EINVAL;
-               val = ilog2(rate);
-               if (val == ilog2(M41T80_SQW_MAX_FREQ))
-                       val = 1;
-               else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
-                       val = ilog2(M41T80_SQW_MAX_FREQ) - val;
-               else
-                       return -EINVAL;
-       }
+       if (rate >= M41T80_SQW_MAX_FREQ)
+               val = 1;
+       else if (rate >= M41T80_SQW_MAX_FREQ / 4)
+               val = 2;
+       else if (rate)
+               val = 15 - ilog2(rate);
 
        reg = i2c_smbus_read_byte_data(client, reg_sqw);
        if (reg < 0)
@@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
        reg = (reg & 0x0f) | (val << 4);
 
        ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
-       if (ret < 0)
-               return ret;
-
-       return -EINVAL;
+       if (!ret)
+               m41t80->freq = m41t80_decode_freq(val);
+       return ret;
 }
 
 static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
@@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
        else
                ret &= ~M41T80_ALMON_SQWE;
 
-       return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+       ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+       if (!ret)
+               m41t80->sqwe = enable;
+       return ret;
 }
 
 static int m41t80_sqw_prepare(struct clk_hw *hw)
@@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw)
 
 static int m41t80_sqw_is_prepared(struct clk_hw *hw)
 {
-       struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
-       struct i2c_client *client = m41t80->client;
-       int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
-
-       if (ret < 0)
-               return ret;
-
-       return !!(ret & M41T80_ALMON_SQWE);
+       return sqw_to_m41t80_data(hw)->sqwe;
 }
 
 static const struct clk_ops m41t80_sqw_ops = {
@@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
        init.parent_names = NULL;
        init.num_parents = 0;
        m41t80->sqw.init = &init;
+       m41t80->freq = m41t80_get_freq(m41t80);
 
        /* optional override of the clockname */
        of_property_read_string(node, "clock-output-names", &init.name);
index 02af045305dd3ce156a7f60b14cc153e548ce60c..d9aea9b6d9cd9189767e8ff074d1c78ffabf9ade 100644 (file)
@@ -163,35 +163,30 @@ static const struct rtc_class_ops m48t86_rtc_ops = {
        .proc           = m48t86_rtc_proc,
 };
 
-static ssize_t m48t86_nvram_read(struct file *filp, struct kobject *kobj,
-                                struct bin_attribute *attr,
-                                char *buf, loff_t off, size_t count)
+static int m48t86_nvram_read(void *priv, unsigned int off, void *buf,
+                            size_t count)
 {
-       struct device *dev = kobj_to_dev(kobj);
+       struct device *dev = priv;
        unsigned int i;
 
        for (i = 0; i < count; i++)
-               buf[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
+               ((u8 *)buf)[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
 
-       return count;
+       return 0;
 }
 
-static ssize_t m48t86_nvram_write(struct file *filp, struct kobject *kobj,
-                                 struct bin_attribute *attr,
-                                 char *buf, loff_t off, size_t count)
+static int m48t86_nvram_write(void *priv, unsigned int off, void *buf,
+                             size_t count)
 {
-       struct device *dev = kobj_to_dev(kobj);
+       struct device *dev = priv;
        unsigned int i;
 
        for (i = 0; i < count; i++)
-               m48t86_writeb(dev, buf[i], M48T86_NVRAM(off + i));
+               m48t86_writeb(dev, ((u8 *)buf)[i], M48T86_NVRAM(off + i));
 
-       return count;
+       return 0;
 }
 
-static BIN_ATTR(nvram, 0644, m48t86_nvram_read, m48t86_nvram_write,
-               M48T86_NVRAM_LEN);
-
 /*
  * The RTC is an optional feature at purchase time on some Technologic Systems
  * boards. Verify that it actually exists by checking if the last two bytes
@@ -223,11 +218,21 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
        return false;
 }
 
+static struct nvmem_config m48t86_nvmem_cfg = {
+       .name = "m48t86_nvram",
+       .word_size = 1,
+       .stride = 1,
+       .size = M48T86_NVRAM_LEN,
+       .reg_read = m48t86_nvram_read,
+       .reg_write = m48t86_nvram_write,
+};
+
 static int m48t86_rtc_probe(struct platform_device *pdev)
 {
        struct m48t86_rtc_info *info;
        struct resource *res;
        unsigned char reg;
+       int err;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
@@ -254,25 +259,25 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       info->rtc = devm_rtc_device_register(&pdev->dev, "m48t86",
-                                            &m48t86_rtc_ops, THIS_MODULE);
+       info->rtc = devm_rtc_allocate_device(&pdev->dev);
        if (IS_ERR(info->rtc))
                return PTR_ERR(info->rtc);
 
+       info->rtc->ops = &m48t86_rtc_ops;
+
+       m48t86_nvmem_cfg.priv = &pdev->dev;
+       info->rtc->nvmem_config = &m48t86_nvmem_cfg;
+       info->rtc->nvram_old_abi = true;
+
+       err = rtc_register_device(info->rtc);
+       if (err)
+               return err;
+
        /* read battery status */
        reg = m48t86_readb(&pdev->dev, M48T86_D);
        dev_info(&pdev->dev, "battery %s\n",
                 (reg & M48T86_D_VRT) ? "ok" : "exhausted");
 
-       if (device_create_bin_file(&pdev->dev, &bin_attr_nvram))
-               dev_err(&pdev->dev, "failed to create nvram sysfs entry\n");
-
-       return 0;
-}
-
-static int m48t86_rtc_remove(struct platform_device *pdev)
-{
-       device_remove_bin_file(&pdev->dev, &bin_attr_nvram);
        return 0;
 }
 
@@ -281,7 +286,6 @@ static struct platform_driver m48t86_rtc_platform_driver = {
                .name   = "rtc-m48t86",
        },
        .probe          = m48t86_rtc_probe,
-       .remove         = m48t86_rtc_remove,
 };
 
 module_platform_driver(m48t86_rtc_platform_driver);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
new file mode 100644 (file)
index 0000000..d79b9ae
--- /dev/null
@@ -0,0 +1,422 @@
+/*
+ * Driver for MediaTek SoC based RTC
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define MTK_RTC_DEV KBUILD_MODNAME
+
+#define MTK_RTC_PWRCHK1                0x4
+#define        RTC_PWRCHK1_MAGIC       0xc6
+
+#define MTK_RTC_PWRCHK2                0x8
+#define        RTC_PWRCHK2_MAGIC       0x9a
+
+#define MTK_RTC_KEY            0xc
+#define        RTC_KEY_MAGIC           0x59
+
+#define MTK_RTC_PROT1          0x10
+#define        RTC_PROT1_MAGIC         0xa3
+
+#define MTK_RTC_PROT2          0x14
+#define        RTC_PROT2_MAGIC         0x57
+
+#define MTK_RTC_PROT3          0x18
+#define        RTC_PROT3_MAGIC         0x67
+
+#define MTK_RTC_PROT4          0x1c
+#define        RTC_PROT4_MAGIC         0xd2
+
+#define MTK_RTC_CTL            0x20
+#define        RTC_RC_STOP             BIT(0)
+
+#define MTK_RTC_DEBNCE         0x2c
+#define        RTC_DEBNCE_MASK         GENMASK(2, 0)
+
+#define MTK_RTC_INT            0x30
+#define RTC_INT_AL_STA         BIT(4)
+
+/*
+ * Ranges from 0x40 to 0x78 provide RTC time setup for year, month,
+ * day of month, day of week, hour, minute and second.
+ */
+#define MTK_RTC_TREG(_t, _f)   (0x40 + (0x4 * (_f)) + ((_t) * 0x20))
+
+#define MTK_RTC_AL_CTL         0x7c
+#define        RTC_AL_EN               BIT(0)
+#define        RTC_AL_ALL              GENMASK(7, 0)
+
+/*
+ * The offset is used in the translation for the year between in struct
+ * rtc_time and in hardware register MTK_RTC_TREG(x,MTK_YEA)
+ */
+#define MTK_RTC_TM_YR_OFFSET   100
+
+/*
+ * The lowest value for the valid tm_year. RTC hardware would take incorrectly
+ * tm_year 100 as not a leap year and thus it is also required being excluded
+ * from the valid options.
+ */
+#define MTK_RTC_TM_YR_L                (MTK_RTC_TM_YR_OFFSET + 1)
+
+/*
+ * The most year the RTC can hold is 99 and the next to 99 in year register
+ * would be wraparound to 0, for MT7622.
+ */
+#define MTK_RTC_HW_YR_LIMIT    99
+
+/* The highest value for the valid tm_year */
+#define MTK_RTC_TM_YR_H                (MTK_RTC_TM_YR_OFFSET + MTK_RTC_HW_YR_LIMIT)
+
+/* Simple macro helps to check whether the hardware supports the tm_year */
+#define MTK_RTC_TM_YR_VALID(_y)        ((_y) >= MTK_RTC_TM_YR_L && \
+                                (_y) <= MTK_RTC_TM_YR_H)
+
+/* Types of the function the RTC provides are time counter and alarm. */
+enum {
+       MTK_TC,
+       MTK_AL,
+};
+
+/* Indexes are used for the pointer to relevant registers in MTK_RTC_TREG */
+enum {
+       MTK_YEA,
+       MTK_MON,
+       MTK_DOM,
+       MTK_DOW,
+       MTK_HOU,
+       MTK_MIN,
+       MTK_SEC
+};
+
+struct mtk_rtc {
+       struct rtc_device *rtc;
+       void __iomem *base;
+       int irq;
+       struct clk *clk;
+};
+
+static void mtk_w32(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+       writel_relaxed(val, rtc->base + reg);
+}
+
+static u32 mtk_r32(struct mtk_rtc *rtc, u32 reg)
+{
+       return readl_relaxed(rtc->base + reg);
+}
+
+static void mtk_rmw(struct mtk_rtc *rtc, u32 reg, u32 mask, u32 set)
+{
+       u32 val;
+
+       val = mtk_r32(rtc, reg);
+       val &= ~mask;
+       val |= set;
+       mtk_w32(rtc, reg, val);
+}
+
+static void mtk_set(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+       mtk_rmw(rtc, reg, 0, val);
+}
+
+static void mtk_clr(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+       mtk_rmw(rtc, reg, val, 0);
+}
+
+static void mtk_rtc_hw_init(struct mtk_rtc *hw)
+{
+       /* The setup of the init sequence is for allowing RTC got to work */
+       mtk_w32(hw, MTK_RTC_PWRCHK1, RTC_PWRCHK1_MAGIC);
+       mtk_w32(hw, MTK_RTC_PWRCHK2, RTC_PWRCHK2_MAGIC);
+       mtk_w32(hw, MTK_RTC_KEY, RTC_KEY_MAGIC);
+       mtk_w32(hw, MTK_RTC_PROT1, RTC_PROT1_MAGIC);
+       mtk_w32(hw, MTK_RTC_PROT2, RTC_PROT2_MAGIC);
+       mtk_w32(hw, MTK_RTC_PROT3, RTC_PROT3_MAGIC);
+       mtk_w32(hw, MTK_RTC_PROT4, RTC_PROT4_MAGIC);
+       mtk_rmw(hw, MTK_RTC_DEBNCE, RTC_DEBNCE_MASK, 0);
+       mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+}
+
+static void mtk_rtc_get_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+                                     int time_alarm)
+{
+       u32 year, mon, mday, wday, hour, min, sec;
+
+       /*
+        * Read again until the field of the second is not changed which
+        * ensures all fields in the consistent state. Note that MTK_SEC must
+        * be read first. In this way, it guarantees the others remain not
+        * changed when the results for two MTK_SEC consecutive reads are same.
+        */
+       do {
+               sec = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC));
+               min = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN));
+               hour = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU));
+               wday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW));
+               mday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM));
+               mon = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MON));
+               year = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA));
+       } while (sec != mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC)));
+
+       tm->tm_sec  = sec;
+       tm->tm_min  = min;
+       tm->tm_hour = hour;
+       tm->tm_wday = wday;
+       tm->tm_mday = mday;
+       tm->tm_mon  = mon - 1;
+
+       /* Rebase to the absolute year which userspace queries */
+       tm->tm_year = year + MTK_RTC_TM_YR_OFFSET;
+}
+
+static void mtk_rtc_set_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+                                     int time_alarm)
+{
+       u32 year;
+
+       /* Rebase to the relative year which RTC hardware requires */
+       year = tm->tm_year - MTK_RTC_TM_YR_OFFSET;
+
+       mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA), year);
+       mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MON), tm->tm_mon + 1);
+       mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW), tm->tm_wday);
+       mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM), tm->tm_mday);
+       mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU), tm->tm_hour);
+       mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN), tm->tm_min);
+       mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC), tm->tm_sec);
+}
+
+static irqreturn_t mtk_rtc_alarmirq(int irq, void *id)
+{
+       struct mtk_rtc *hw = (struct mtk_rtc *)id;
+       u32 irq_sta;
+
+       irq_sta = mtk_r32(hw, MTK_RTC_INT);
+       if (irq_sta & RTC_INT_AL_STA) {
+               /* Stop alarm also implicitly disables the alarm interrupt */
+               mtk_w32(hw, MTK_RTC_AL_CTL, 0);
+               rtc_update_irq(hw->rtc, 1, RTC_IRQF | RTC_AF);
+
+               /* Ack alarm interrupt status */
+               mtk_w32(hw, MTK_RTC_INT, RTC_INT_AL_STA);
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static int mtk_rtc_gettime(struct device *dev, struct rtc_time *tm)
+{
+       struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+       mtk_rtc_get_alarm_or_time(hw, tm, MTK_TC);
+
+       return rtc_valid_tm(tm);
+}
+
+static int mtk_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+       struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+       if (!MTK_RTC_TM_YR_VALID(tm->tm_year))
+               return -EINVAL;
+
+       /* Stop time counter before setting a new one*/
+       mtk_set(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+       mtk_rtc_set_alarm_or_time(hw, tm, MTK_TC);
+
+       /* Restart the time counter */
+       mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+       return 0;
+}
+
+static int mtk_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+       struct mtk_rtc *hw = dev_get_drvdata(dev);
+       struct rtc_time *alrm_tm = &wkalrm->time;
+
+       mtk_rtc_get_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+       wkalrm->enabled = !!(mtk_r32(hw, MTK_RTC_AL_CTL) & RTC_AL_EN);
+       wkalrm->pending = !!(mtk_r32(hw, MTK_RTC_INT) & RTC_INT_AL_STA);
+
+       return 0;
+}
+
+static int mtk_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+       struct mtk_rtc *hw = dev_get_drvdata(dev);
+       struct rtc_time *alrm_tm = &wkalrm->time;
+
+       if (!MTK_RTC_TM_YR_VALID(alrm_tm->tm_year))
+               return -EINVAL;
+
+       /*
+        * Stop the alarm also implicitly including disables interrupt before
+        * setting a new one.
+        */
+       mtk_clr(hw, MTK_RTC_AL_CTL, RTC_AL_EN);
+
+       /*
+        * Avoid contention between mtk_rtc_setalarm and IRQ handler so that
+        * disabling the interrupt and awaiting for pending IRQ handler to
+        * complete.
+        */
+       synchronize_irq(hw->irq);
+
+       mtk_rtc_set_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+       /* Restart the alarm with the new setup */
+       mtk_w32(hw, MTK_RTC_AL_CTL, RTC_AL_ALL);
+
+       return 0;
+}
+
+static const struct rtc_class_ops mtk_rtc_ops = {
+       .read_time              = mtk_rtc_gettime,
+       .set_time               = mtk_rtc_settime,
+       .read_alarm             = mtk_rtc_getalarm,
+       .set_alarm              = mtk_rtc_setalarm,
+};
+
+static const struct of_device_id mtk_rtc_match[] = {
+       { .compatible = "mediatek,mt7622-rtc" },
+       { .compatible = "mediatek,soc-rtc" },
+       {},
+};
+
+static int mtk_rtc_probe(struct platform_device *pdev)
+{
+       struct mtk_rtc *hw;
+       struct resource *res;
+       int ret;
+
+       hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+       if (!hw)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, hw);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       hw->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(hw->base))
+               return PTR_ERR(hw->base);
+
+       hw->clk = devm_clk_get(&pdev->dev, "rtc");
+       if (IS_ERR(hw->clk)) {
+               dev_err(&pdev->dev, "No clock\n");
+               return PTR_ERR(hw->clk);
+       }
+
+       ret = clk_prepare_enable(hw->clk);
+       if (ret)
+               return ret;
+
+       hw->irq = platform_get_irq(pdev, 0);
+       if (hw->irq < 0) {
+               dev_err(&pdev->dev, "No IRQ resource\n");
+               ret = hw->irq;
+               goto err;
+       }
+
+       ret = devm_request_irq(&pdev->dev, hw->irq, mtk_rtc_alarmirq,
+                              0, dev_name(&pdev->dev), hw);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't request IRQ\n");
+               goto err;
+       }
+
+       mtk_rtc_hw_init(hw);
+
+       device_init_wakeup(&pdev->dev, true);
+
+       hw->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+                                          &mtk_rtc_ops, THIS_MODULE);
+       if (IS_ERR(hw->rtc)) {
+               ret = PTR_ERR(hw->rtc);
+               dev_err(&pdev->dev, "Unable to register device\n");
+               goto err;
+       }
+
+       return 0;
+err:
+       clk_disable_unprepare(hw->clk);
+
+       return ret;
+}
+
+static int mtk_rtc_remove(struct platform_device *pdev)
+{
+       struct mtk_rtc *hw = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(hw->clk);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_rtc_suspend(struct device *dev)
+{
+       struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               enable_irq_wake(hw->irq);
+
+       return 0;
+}
+
+static int mtk_rtc_resume(struct device *dev)
+{
+       struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               disable_irq_wake(hw->irq);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_rtc_pm_ops, mtk_rtc_suspend, mtk_rtc_resume);
+
+#define MTK_RTC_PM_OPS (&mtk_rtc_pm_ops)
+#else  /* CONFIG_PM */
+#define MTK_RTC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver mtk_rtc_driver = {
+       .probe  = mtk_rtc_probe,
+       .remove = mtk_rtc_remove,
+       .driver = {
+               .name = MTK_RTC_DEV,
+               .of_match_table = mtk_rtc_match,
+               .pm = MTK_RTC_PM_OPS,
+       },
+};
+
+module_platform_driver(mtk_rtc_driver);
+
+MODULE_DESCRIPTION("MediaTek SoC based RTC Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
index 13f7cd11c07eb52948121225242b57f8ab2c8e2c..1d666ac9ef706e083124516f18663a347e9db564 100644 (file)
 #define OMAP_RTC_COMP_MSB_REG          0x50
 #define OMAP_RTC_OSC_REG               0x54
 
+#define OMAP_RTC_SCRATCH0_REG          0x60
+#define OMAP_RTC_SCRATCH1_REG          0x64
+#define OMAP_RTC_SCRATCH2_REG          0x68
+
 #define OMAP_RTC_KICK0_REG             0x6c
 #define OMAP_RTC_KICK1_REG             0x70
 
@@ -667,6 +671,45 @@ static struct pinctrl_desc rtc_pinctrl_desc = {
        .owner = THIS_MODULE,
 };
 
+static int omap_rtc_scratch_read(void *priv, unsigned int offset, void *_val,
+                                size_t bytes)
+{
+       struct omap_rtc *rtc = priv;
+       u32 *val = _val;
+       int i;
+
+       for (i = 0; i < bytes / 4; i++)
+               val[i] = rtc_readl(rtc,
+                                  OMAP_RTC_SCRATCH0_REG + offset + (i * 4));
+
+       return 0;
+}
+
+static int omap_rtc_scratch_write(void *priv, unsigned int offset, void *_val,
+                                 size_t bytes)
+{
+       struct omap_rtc *rtc = priv;
+       u32 *val = _val;
+       int i;
+
+       rtc->type->unlock(rtc);
+       for (i = 0; i < bytes / 4; i++)
+               rtc_writel(rtc,
+                          OMAP_RTC_SCRATCH0_REG + offset + (i * 4), val[i]);
+       rtc->type->lock(rtc);
+
+       return 0;
+}
+
+static struct nvmem_config omap_rtc_nvmem_config = {
+       .name = "omap_rtc_scratch",
+       .word_size = 4,
+       .stride = 4,
+       .size = OMAP_RTC_KICK0_REG - OMAP_RTC_SCRATCH0_REG,
+       .reg_read = omap_rtc_scratch_read,
+       .reg_write = omap_rtc_scratch_write,
+};
+
 static int omap_rtc_probe(struct platform_device *pdev)
 {
        struct omap_rtc *rtc;
@@ -797,13 +840,16 @@ static int omap_rtc_probe(struct platform_device *pdev)
 
        device_init_wakeup(&pdev->dev, true);
 
-       rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-                       &omap_rtc_ops, THIS_MODULE);
+       rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
        if (IS_ERR(rtc->rtc)) {
                ret = PTR_ERR(rtc->rtc);
                goto err;
        }
 
+       rtc->rtc->ops = &omap_rtc_ops;
+       omap_rtc_nvmem_config.priv = rtc;
+       rtc->rtc->nvmem_config = &omap_rtc_nvmem_config;
+
        /* handle periodic and alarm irqs */
        ret = devm_request_irq(&pdev->dev, rtc->irq_timer, rtc_irq, 0,
                        dev_name(&rtc->rtc->dev), rtc);
@@ -830,9 +876,14 @@ static int omap_rtc_probe(struct platform_device *pdev)
        rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
        if (IS_ERR(rtc->pctldev)) {
                dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
-               return PTR_ERR(rtc->pctldev);
+               ret = PTR_ERR(rtc->pctldev);
+               goto err;
        }
 
+       ret = rtc_register_device(rtc->rtc);
+       if (ret)
+               goto err;
+
        return 0;
 
 err:
index 28c48b3c1946dc8856b7c00a090ba132755dddc7..c312af0db72957af5ed4980663fce838ab7cec1c 100644 (file)
@@ -35,6 +35,9 @@
 #define REG_MONTHS   0x08
 #define REG_YEARS    0x09
 
+#define REG_OFFSET   0x0e
+#define REG_OFFSET_MODE BIT(7)
+
 struct pcf8523 {
        struct rtc_device *rtc;
 };
@@ -272,10 +275,47 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
 #define pcf8523_rtc_ioctl NULL
 #endif
 
+static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       int err;
+       u8 value;
+       s8 val;
+
+       err = pcf8523_read(client, REG_OFFSET, &value);
+       if (err < 0)
+               return err;
+
+       /* sign extend the 7-bit offset value */
+       val = value << 1;
+       *offset = (value & REG_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
+
+       return 0;
+}
+
+static int pcf8523_rtc_set_offset(struct device *dev, long offset)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       long reg_m0, reg_m1;
+       u8 value;
+
+       reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L);
+       reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L);
+
+       if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset))
+               value = reg_m0 & 0x7f;
+       else
+               value = (reg_m1 & 0x7f) | REG_OFFSET_MODE;
+
+       return pcf8523_write(client, REG_OFFSET, value);
+}
+
 static const struct rtc_class_ops pcf8523_rtc_ops = {
        .read_time = pcf8523_rtc_read_time,
        .set_time = pcf8523_rtc_set_time,
        .ioctl = pcf8523_rtc_ioctl,
+       .read_offset = pcf8523_rtc_read_offset,
+       .set_offset = pcf8523_rtc_set_offset,
 };
 
 static int pcf8523_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
new file mode 100644 (file)
index 0000000..ea04e9f
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * drivers/rtc/rtc-pcf85363.c
+ *
+ * Driver for NXP PCF85363 real-time clock.
+ *
+ * Copyright (C) 2017 Eric Nelson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based loosely on rtc-8583 by Russell King, Wolfram Sang and Juergen Beisert
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/bcd.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/*
+ * Date/Time registers
+ */
+#define DT_100THS      0x00
+#define DT_SECS                0x01
+#define DT_MINUTES     0x02
+#define DT_HOURS       0x03
+#define DT_DAYS                0x04
+#define DT_WEEKDAYS    0x05
+#define DT_MONTHS      0x06
+#define DT_YEARS       0x07
+
+/*
+ * Alarm registers
+ */
+#define DT_SECOND_ALM1 0x08
+#define DT_MINUTE_ALM1 0x09
+#define DT_HOUR_ALM1   0x0a
+#define DT_DAY_ALM1    0x0b
+#define DT_MONTH_ALM1  0x0c
+#define DT_MINUTE_ALM2 0x0d
+#define DT_HOUR_ALM2   0x0e
+#define DT_WEEKDAY_ALM2        0x0f
+#define DT_ALARM_EN    0x10
+
+/*
+ * Time stamp registers
+ */
+#define DT_TIMESTAMP1  0x11
+#define DT_TIMESTAMP2  0x17
+#define DT_TIMESTAMP3  0x1d
+#define DT_TS_MODE     0x23
+
+/*
+ * control registers
+ */
+#define CTRL_OFFSET    0x24
+#define CTRL_OSCILLATOR        0x25
+#define CTRL_BATTERY   0x26
+#define CTRL_PIN_IO    0x27
+#define CTRL_FUNCTION  0x28
+#define CTRL_INTA_EN   0x29
+#define CTRL_INTB_EN   0x2a
+#define CTRL_FLAGS     0x2b
+#define CTRL_RAMBYTE   0x2c
+#define CTRL_WDOG      0x2d
+#define CTRL_STOP_EN   0x2e
+#define CTRL_RESETS    0x2f
+#define CTRL_RAM       0x40
+
+#define NVRAM_SIZE     0x40
+
+static struct i2c_driver pcf85363_driver;
+
+struct pcf85363 {
+       struct device           *dev;
+       struct rtc_device       *rtc;
+       struct nvmem_config     nvmem_cfg;
+       struct regmap           *regmap;
+};
+
+static int pcf85363_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+       unsigned char buf[DT_YEARS + 1];
+       int ret, len = sizeof(buf);
+
+       /* read the RTC date and time registers all at once */
+       ret = regmap_bulk_read(pcf85363->regmap, DT_100THS, buf, len);
+       if (ret) {
+               dev_err(dev, "%s: error %d\n", __func__, ret);
+               return ret;
+       }
+
+       tm->tm_year = bcd2bin(buf[DT_YEARS]);
+       /* adjust for 1900 base of rtc_time */
+       tm->tm_year += 100;
+
+       tm->tm_wday = buf[DT_WEEKDAYS] & 7;
+       buf[DT_SECS] &= 0x7F;
+       tm->tm_sec = bcd2bin(buf[DT_SECS]);
+       buf[DT_MINUTES] &= 0x7F;
+       tm->tm_min = bcd2bin(buf[DT_MINUTES]);
+       tm->tm_hour = bcd2bin(buf[DT_HOURS]);
+       tm->tm_mday = bcd2bin(buf[DT_DAYS]);
+       tm->tm_mon = bcd2bin(buf[DT_MONTHS]) - 1;
+
+       return 0;
+}
+
+static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+       unsigned char buf[DT_YEARS + 1];
+       int len = sizeof(buf);
+
+       buf[DT_100THS] = 0;
+       buf[DT_SECS] = bin2bcd(tm->tm_sec);
+       buf[DT_MINUTES] = bin2bcd(tm->tm_min);
+       buf[DT_HOURS] = bin2bcd(tm->tm_hour);
+       buf[DT_DAYS] = bin2bcd(tm->tm_mday);
+       buf[DT_WEEKDAYS] = tm->tm_wday;
+       buf[DT_MONTHS] = bin2bcd(tm->tm_mon + 1);
+       buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
+
+       return regmap_bulk_write(pcf85363->regmap, DT_100THS,
+                                buf, len);
+}
+
+static const struct rtc_class_ops rtc_ops = {
+       .read_time      = pcf85363_rtc_read_time,
+       .set_time       = pcf85363_rtc_set_time,
+};
+
+static int pcf85363_nvram_read(void *priv, unsigned int offset, void *val,
+                              size_t bytes)
+{
+       struct pcf85363 *pcf85363 = priv;
+
+       return regmap_bulk_read(pcf85363->regmap, CTRL_RAM + offset,
+                               val, bytes);
+}
+
+static int pcf85363_nvram_write(void *priv, unsigned int offset, void *val,
+                               size_t bytes)
+{
+       struct pcf85363 *pcf85363 = priv;
+
+       return regmap_bulk_write(pcf85363->regmap, CTRL_RAM + offset,
+                                val, bytes);
+}
+
+static const struct regmap_config regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+};
+
+static int pcf85363_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
+{
+       struct pcf85363 *pcf85363;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -ENODEV;
+
+       pcf85363 = devm_kzalloc(&client->dev, sizeof(struct pcf85363),
+                               GFP_KERNEL);
+       if (!pcf85363)
+               return -ENOMEM;
+
+       pcf85363->regmap = devm_regmap_init_i2c(client, &regmap_config);
+       if (IS_ERR(pcf85363->regmap)) {
+               dev_err(&client->dev, "regmap allocation failed\n");
+               return PTR_ERR(pcf85363->regmap);
+       }
+
+       pcf85363->dev = &client->dev;
+       i2c_set_clientdata(client, pcf85363);
+
+       pcf85363->rtc = devm_rtc_allocate_device(pcf85363->dev);
+       if (IS_ERR(pcf85363->rtc))
+               return PTR_ERR(pcf85363->rtc);
+
+       pcf85363->nvmem_cfg.name = "pcf85363-";
+       pcf85363->nvmem_cfg.word_size = 1;
+       pcf85363->nvmem_cfg.stride = 1;
+       pcf85363->nvmem_cfg.size = NVRAM_SIZE;
+       pcf85363->nvmem_cfg.reg_read = pcf85363_nvram_read;
+       pcf85363->nvmem_cfg.reg_write = pcf85363_nvram_write;
+       pcf85363->nvmem_cfg.priv = pcf85363;
+       pcf85363->rtc->nvmem_config = &pcf85363->nvmem_cfg;
+       pcf85363->rtc->ops = &rtc_ops;
+
+       return rtc_register_device(pcf85363->rtc);
+}
+
+static const struct of_device_id dev_ids[] = {
+       { .compatible = "nxp,pcf85363" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver pcf85363_driver = {
+       .driver = {
+               .name   = "pcf85363",
+               .of_match_table = of_match_ptr(dev_ids),
+       },
+       .probe  = pcf85363_probe,
+};
+
+module_i2c_driver(pcf85363_driver);
+
+MODULE_AUTHOR("Eric Nelson");
+MODULE_DESCRIPTION("pcf85363 I2C RTC driver");
+MODULE_LICENSE("GPL");
index cea6ea4df970ff44925008b643e894300d3adeaa..3efc86c25d27a5754031f95136522766e67cd563 100644 (file)
@@ -387,7 +387,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
        if (err)
                return err;
 
-       return pcf8563_set_alarm_mode(client, 1);
+       return pcf8563_set_alarm_mode(client, !!tm->enabled);
 }
 
 static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
@@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
                return 0;
 
        buf &= PCF8563_REG_CLKO_F_MASK;
-       return clkout_rates[ret];
+       return clkout_rates[buf];
 }
 
 static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
index e1687e19c59f4ad57e6dd78d5f7e8702fa411d27..82eb7da2c47835b3e5515e48bc081ada07e892de 100644 (file)
@@ -308,10 +308,9 @@ static int pl031_remove(struct amba_device *adev)
 
        dev_pm_clear_wake_irq(&adev->dev);
        device_init_wakeup(&adev->dev, false);
-       free_irq(adev->irq[0], ldata);
+       if (adev->irq[0])
+               free_irq(adev->irq[0], ldata);
        rtc_device_unregister(ldata->rtc);
-       iounmap(ldata->base);
-       kfree(ldata);
        amba_release_regions(adev);
 
        return 0;
@@ -322,25 +321,28 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        int ret;
        struct pl031_local *ldata;
        struct pl031_vendor_data *vendor = id->data;
-       struct rtc_class_ops *ops = &vendor->ops;
+       struct rtc_class_ops *ops;
        unsigned long time, data;
 
        ret = amba_request_regions(adev, NULL);
        if (ret)
                goto err_req;
 
-       ldata = kzalloc(sizeof(struct pl031_local), GFP_KERNEL);
-       if (!ldata) {
+       ldata = devm_kzalloc(&adev->dev, sizeof(struct pl031_local),
+                            GFP_KERNEL);
+       ops = devm_kmemdup(&adev->dev, &vendor->ops, sizeof(vendor->ops),
+                          GFP_KERNEL);
+       if (!ldata || !ops) {
                ret = -ENOMEM;
                goto out;
        }
-       ldata->vendor = vendor;
-
-       ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
 
+       ldata->vendor = vendor;
+       ldata->base = devm_ioremap(&adev->dev, adev->res.start,
+                                  resource_size(&adev->res));
        if (!ldata->base) {
                ret = -ENOMEM;
-               goto out_no_remap;
+               goto out;
        }
 
        amba_set_drvdata(adev, ldata);
@@ -373,28 +375,32 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
                }
        }
 
+       if (!adev->irq[0]) {
+               /* When there's no interrupt, no point in exposing the alarm */
+               ops->read_alarm = NULL;
+               ops->set_alarm = NULL;
+               ops->alarm_irq_enable = NULL;
+       }
+
        device_init_wakeup(&adev->dev, true);
        ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
                                        THIS_MODULE);
        if (IS_ERR(ldata->rtc)) {
                ret = PTR_ERR(ldata->rtc);
-               goto out_no_rtc;
+               goto out;
        }
 
-       if (request_irq(adev->irq[0], pl031_interrupt,
-                       vendor->irqflags, "rtc-pl031", ldata)) {
-               ret = -EIO;
-               goto out_no_irq;
+       if (adev->irq[0]) {
+               ret = request_irq(adev->irq[0], pl031_interrupt,
+                                 vendor->irqflags, "rtc-pl031", ldata);
+               if (ret)
+                       goto out_no_irq;
+               dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
        }
-       dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
        return 0;
 
 out_no_irq:
        rtc_device_unregister(ldata->rtc);
-out_no_rtc:
-       iounmap(ldata->base);
-out_no_remap:
-       kfree(ldata);
 out:
        amba_release_regions(adev);
 err_req:
@@ -446,7 +452,7 @@ static struct pl031_vendor_data stv2_pl031 = {
        .irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
 };
 
-static struct amba_id pl031_ids[] = {
+static const struct amba_id pl031_ids[] = {
        {
                .id = 0x00041031,
                .mask = 0x000fffff,
index aa09771de04f764cff457180ef090bb2f052d632..3d6174eb32f6a3b09920c7a9eb370fcd1fc95c89 100644 (file)
@@ -282,13 +282,13 @@ static int rv3029_eeprom_read(struct device *dev, u8 reg,
 static int rv3029_eeprom_write(struct device *dev, u8 reg,
                               u8 const buf[], size_t len)
 {
-       int ret, err;
+       int ret;
        size_t i;
        u8 tmp;
 
-       err = rv3029_eeprom_enter(dev);
-       if (err < 0)
-               return err;
+       ret = rv3029_eeprom_enter(dev);
+       if (ret < 0)
+               return ret;
 
        for (i = 0; i < len; i++, reg++) {
                ret = rv3029_read_regs(dev, reg, &tmp, 1);
@@ -304,11 +304,11 @@ static int rv3029_eeprom_write(struct device *dev, u8 reg,
                        break;
        }
 
-       err = rv3029_eeprom_exit(dev);
-       if (err < 0)
-               return err;
+       ret = rv3029_eeprom_exit(dev);
+       if (ret < 0)
+               return ret;
 
-       return ret;
+       return 0;
 }
 
 static int rv3029_eeprom_update_bits(struct device *dev,
@@ -876,6 +876,8 @@ static const struct i2c_device_id rv3029_id[] = {
 MODULE_DEVICE_TABLE(i2c, rv3029_id);
 
 static const struct of_device_id rv3029_of_match[] = {
+       { .compatible = "microcrystal,rv3029" },
+       /* Backward compatibility only, do not use compatibles below: */
        { .compatible = "rv3029" },
        { .compatible = "rv3029c2" },
        { .compatible = "mc,rv3029c2" },
index 1ed3403ff8ac23ae1c9c2923b76f3f8aff1c9c11..5c5938ab3d86bcb0bef62e9425553cce800b9d2b 100644 (file)
@@ -24,7 +24,6 @@
 #define RX8010_MDAY    0x14
 #define RX8010_MONTH   0x15
 #define RX8010_YEAR    0x16
-#define RX8010_YEAR    0x16
 #define RX8010_RESV17  0x17
 #define RX8010_ALMIN   0x18
 #define RX8010_ALHOUR  0x19
@@ -36,7 +35,7 @@
 #define RX8010_CTRL    0x1F
 /* 0x20 to 0x2F are user registers */
 #define RX8010_RESV30  0x30
-#define RX8010_RESV31  0x32
+#define RX8010_RESV31  0x31
 #define RX8010_IRQ     0x32
 
 #define RX8010_EXT_WADA  BIT(3)
@@ -248,7 +247,7 @@ static int rx8010_init_client(struct i2c_client *client)
 
        rx8010->ctrlreg = (ctrl[1] & ~RX8010_CTRL_TEST);
 
-       return err;
+       return 0;
 }
 
 static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -277,7 +276,7 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
        t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
 
-       return err;
+       return 0;
 }
 
 static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
new file mode 100644 (file)
index 0000000..d544d52
--- /dev/null
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define SPRD_RTC_SEC_CNT_VALUE         0x0
+#define SPRD_RTC_MIN_CNT_VALUE         0x4
+#define SPRD_RTC_HOUR_CNT_VALUE                0x8
+#define SPRD_RTC_DAY_CNT_VALUE         0xc
+#define SPRD_RTC_SEC_CNT_UPD           0x10
+#define SPRD_RTC_MIN_CNT_UPD           0x14
+#define SPRD_RTC_HOUR_CNT_UPD          0x18
+#define SPRD_RTC_DAY_CNT_UPD           0x1c
+#define SPRD_RTC_SEC_ALM_UPD           0x20
+#define SPRD_RTC_MIN_ALM_UPD           0x24
+#define SPRD_RTC_HOUR_ALM_UPD          0x28
+#define SPRD_RTC_DAY_ALM_UPD           0x2c
+#define SPRD_RTC_INT_EN                        0x30
+#define SPRD_RTC_INT_RAW_STS           0x34
+#define SPRD_RTC_INT_CLR               0x38
+#define SPRD_RTC_INT_MASK_STS          0x3C
+#define SPRD_RTC_SEC_ALM_VALUE         0x40
+#define SPRD_RTC_MIN_ALM_VALUE         0x44
+#define SPRD_RTC_HOUR_ALM_VALUE                0x48
+#define SPRD_RTC_DAY_ALM_VALUE         0x4c
+#define SPRD_RTC_SPG_VALUE             0x50
+#define SPRD_RTC_SPG_UPD               0x54
+#define SPRD_RTC_SEC_AUXALM_UPD                0x60
+#define SPRD_RTC_MIN_AUXALM_UPD                0x64
+#define SPRD_RTC_HOUR_AUXALM_UPD       0x68
+#define SPRD_RTC_DAY_AUXALM_UPD                0x6c
+
+/* BIT & MASK definition for SPRD_RTC_INT_* registers */
+#define SPRD_RTC_SEC_EN                        BIT(0)
+#define SPRD_RTC_MIN_EN                        BIT(1)
+#define SPRD_RTC_HOUR_EN               BIT(2)
+#define SPRD_RTC_DAY_EN                        BIT(3)
+#define SPRD_RTC_ALARM_EN              BIT(4)
+#define SPRD_RTC_HRS_FORMAT_EN         BIT(5)
+#define SPRD_RTC_AUXALM_EN             BIT(6)
+#define SPRD_RTC_SPG_UPD_EN            BIT(7)
+#define SPRD_RTC_SEC_UPD_EN            BIT(8)
+#define SPRD_RTC_MIN_UPD_EN            BIT(9)
+#define SPRD_RTC_HOUR_UPD_EN           BIT(10)
+#define SPRD_RTC_DAY_UPD_EN            BIT(11)
+#define SPRD_RTC_ALMSEC_UPD_EN         BIT(12)
+#define SPRD_RTC_ALMMIN_UPD_EN         BIT(13)
+#define SPRD_RTC_ALMHOUR_UPD_EN                BIT(14)
+#define SPRD_RTC_ALMDAY_UPD_EN         BIT(15)
+#define SPRD_RTC_INT_MASK              GENMASK(15, 0)
+
+#define SPRD_RTC_TIME_INT_MASK                         \
+       (SPRD_RTC_SEC_UPD_EN | SPRD_RTC_MIN_UPD_EN |    \
+        SPRD_RTC_HOUR_UPD_EN | SPRD_RTC_DAY_UPD_EN)
+
+#define SPRD_RTC_ALMTIME_INT_MASK                              \
+       (SPRD_RTC_ALMSEC_UPD_EN | SPRD_RTC_ALMMIN_UPD_EN |      \
+        SPRD_RTC_ALMHOUR_UPD_EN | SPRD_RTC_ALMDAY_UPD_EN)
+
+#define SPRD_RTC_ALM_INT_MASK                  \
+       (SPRD_RTC_SEC_EN | SPRD_RTC_MIN_EN |    \
+        SPRD_RTC_HOUR_EN | SPRD_RTC_DAY_EN |   \
+        SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN)
+
+/* second/minute/hour/day values mask definition */
+#define SPRD_RTC_SEC_MASK              GENMASK(5, 0)
+#define SPRD_RTC_MIN_MASK              GENMASK(5, 0)
+#define SPRD_RTC_HOUR_MASK             GENMASK(4, 0)
+#define SPRD_RTC_DAY_MASK              GENMASK(15, 0)
+
+/* alarm lock definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_ALMLOCK_MASK          GENMASK(7, 0)
+#define SPRD_RTC_ALM_UNLOCK            0xa5
+#define SPRD_RTC_ALM_LOCK              (~SPRD_RTC_ALM_UNLOCK & \
+                                        SPRD_RTC_ALMLOCK_MASK)
+
+/* SPG values definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_POWEROFF_ALM_FLAG     BIT(8)
+#define SPRD_RTC_POWER_RESET_FLAG      BIT(9)
+
+/* timeout of synchronizing time and alarm registers (us) */
+#define SPRD_RTC_POLL_TIMEOUT          200000
+#define SPRD_RTC_POLL_DELAY_US         20000
+
+struct sprd_rtc {
+       struct rtc_device       *rtc;
+       struct regmap           *regmap;
+       struct device           *dev;
+       u32                     base;
+       int                     irq;
+       bool                    valid;
+};
+
+/*
+ * The Spreadtrum RTC controller has 3 groups registers, including time, normal
+ * alarm and auxiliary alarm. The time group registers are used to set RTC time,
+ * the normal alarm registers are used to set normal alarm, and the auxiliary
+ * alarm registers are used to set auxiliary alarm. Both alarm event and
+ * auxiliary alarm event can wake up system from deep sleep, but only alarm
+ * event can power up system from power down status.
+ */
+enum sprd_rtc_reg_types {
+       SPRD_RTC_TIME,
+       SPRD_RTC_ALARM,
+       SPRD_RTC_AUX_ALARM,
+};
+
+static int sprd_rtc_clear_alarm_ints(struct sprd_rtc *rtc)
+{
+       return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+                           SPRD_RTC_ALM_INT_MASK);
+}
+
+static int sprd_rtc_disable_ints(struct sprd_rtc *rtc)
+{
+       int ret;
+
+       ret = regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+                                SPRD_RTC_INT_MASK, 0);
+       if (ret)
+               return ret;
+
+       return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+                           SPRD_RTC_INT_MASK);
+}
+
+static int sprd_rtc_lock_alarm(struct sprd_rtc *rtc, bool lock)
+{
+       int ret;
+       u32 val;
+
+       ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+       if (ret)
+               return ret;
+
+       val &= ~(SPRD_RTC_ALMLOCK_MASK | SPRD_RTC_POWEROFF_ALM_FLAG);
+       if (lock)
+               val |= SPRD_RTC_ALM_LOCK;
+       else
+               val |= SPRD_RTC_ALM_UNLOCK | SPRD_RTC_POWEROFF_ALM_FLAG;
+
+       ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_SPG_UPD, val);
+       if (ret)
+               return ret;
+
+       /* wait until the SPG value is updated successfully */
+       ret = regmap_read_poll_timeout(rtc->regmap,
+                                      rtc->base + SPRD_RTC_INT_RAW_STS, val,
+                                      (val & SPRD_RTC_SPG_UPD_EN),
+                                      SPRD_RTC_POLL_DELAY_US,
+                                      SPRD_RTC_POLL_TIMEOUT);
+       if (ret) {
+               dev_err(rtc->dev, "failed to update SPG value:%d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int sprd_rtc_get_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+                            time64_t *secs)
+{
+       u32 sec_reg, min_reg, hour_reg, day_reg;
+       u32 val, sec, min, hour, day;
+       int ret;
+
+       switch (type) {
+       case SPRD_RTC_TIME:
+               sec_reg = SPRD_RTC_SEC_CNT_VALUE;
+               min_reg = SPRD_RTC_MIN_CNT_VALUE;
+               hour_reg = SPRD_RTC_HOUR_CNT_VALUE;
+               day_reg = SPRD_RTC_DAY_CNT_VALUE;
+               break;
+       case SPRD_RTC_ALARM:
+               sec_reg = SPRD_RTC_SEC_ALM_VALUE;
+               min_reg = SPRD_RTC_MIN_ALM_VALUE;
+               hour_reg = SPRD_RTC_HOUR_ALM_VALUE;
+               day_reg = SPRD_RTC_DAY_ALM_VALUE;
+               break;
+       case SPRD_RTC_AUX_ALARM:
+               sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+               min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+               hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+               day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = regmap_read(rtc->regmap, rtc->base + sec_reg, &val);
+       if (ret)
+               return ret;
+
+       sec = val & SPRD_RTC_SEC_MASK;
+
+       ret = regmap_read(rtc->regmap, rtc->base + min_reg, &val);
+       if (ret)
+               return ret;
+
+       min = val & SPRD_RTC_MIN_MASK;
+
+       ret = regmap_read(rtc->regmap, rtc->base + hour_reg, &val);
+       if (ret)
+               return ret;
+
+       hour = val & SPRD_RTC_HOUR_MASK;
+
+       ret = regmap_read(rtc->regmap, rtc->base + day_reg, &val);
+       if (ret)
+               return ret;
+
+       day = val & SPRD_RTC_DAY_MASK;
+       *secs = (((time64_t)(day * 24) + hour) * 60 + min) * 60 + sec;
+       return 0;
+}
+
+static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+                            time64_t secs)
+{
+       u32 sec_reg, min_reg, hour_reg, day_reg, sts_mask;
+       u32 sec, min, hour, day, val;
+       int ret, rem;
+
+       /* convert seconds to RTC time format */
+       day = div_s64_rem(secs, 86400, &rem);
+       hour = rem / 3600;
+       rem -= hour * 3600;
+       min = rem / 60;
+       sec = rem - min * 60;
+
+       switch (type) {
+       case SPRD_RTC_TIME:
+               sec_reg = SPRD_RTC_SEC_CNT_UPD;
+               min_reg = SPRD_RTC_MIN_CNT_UPD;
+               hour_reg = SPRD_RTC_HOUR_CNT_UPD;
+               day_reg = SPRD_RTC_DAY_CNT_UPD;
+               sts_mask = SPRD_RTC_TIME_INT_MASK;
+               break;
+       case SPRD_RTC_ALARM:
+               sec_reg = SPRD_RTC_SEC_ALM_UPD;
+               min_reg = SPRD_RTC_MIN_ALM_UPD;
+               hour_reg = SPRD_RTC_HOUR_ALM_UPD;
+               day_reg = SPRD_RTC_DAY_ALM_UPD;
+               sts_mask = SPRD_RTC_ALMTIME_INT_MASK;
+               break;
+       case SPRD_RTC_AUX_ALARM:
+               sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+               min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+               hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+               day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+               sts_mask = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = regmap_write(rtc->regmap, rtc->base + sec_reg, sec);
+       if (ret)
+               return ret;
+
+       ret = regmap_write(rtc->regmap, rtc->base + min_reg, min);
+       if (ret)
+               return ret;
+
+       ret = regmap_write(rtc->regmap, rtc->base + hour_reg, hour);
+       if (ret)
+               return ret;
+
+       ret = regmap_write(rtc->regmap, rtc->base + day_reg, day);
+       if (ret)
+               return ret;
+
+       if (type == SPRD_RTC_AUX_ALARM)
+               return 0;
+
+       /*
+        * Since the time and normal alarm registers are put in always-power-on
+        * region supplied by VDDRTC, then these registers changing time will
+        * be very long, about 125ms. Thus here we should wait until all
+        * values are updated successfully.
+        */
+       ret = regmap_read_poll_timeout(rtc->regmap,
+                                      rtc->base + SPRD_RTC_INT_RAW_STS, val,
+                                      ((val & sts_mask) == sts_mask),
+                                      SPRD_RTC_POLL_DELAY_US,
+                                      SPRD_RTC_POLL_TIMEOUT);
+       if (ret < 0) {
+               dev_err(rtc->dev, "set time/alarm values timeout\n");
+               return ret;
+       }
+
+       return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+                           sts_mask);
+}
+
+static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct sprd_rtc *rtc = dev_get_drvdata(dev);
+       time64_t secs;
+       u32 val;
+       int ret;
+
+       ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs);
+       if (ret)
+               return ret;
+
+       rtc_time64_to_tm(secs, &alrm->time);
+
+       ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+       if (ret)
+               return ret;
+
+       alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN);
+
+       ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+       if (ret)
+               return ret;
+
+       alrm->pending = !!(val & SPRD_RTC_AUXALM_EN);
+       return 0;
+}
+
+static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct sprd_rtc *rtc = dev_get_drvdata(dev);
+       time64_t secs = rtc_tm_to_time64(&alrm->time);
+       int ret;
+
+       /* clear the auxiliary alarm interrupt status */
+       ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+                          SPRD_RTC_AUXALM_EN);
+       if (ret)
+               return ret;
+
+       ret = sprd_rtc_set_secs(rtc, SPRD_RTC_AUX_ALARM, secs);
+       if (ret)
+               return ret;
+
+       if (alrm->enabled) {
+               ret = regmap_update_bits(rtc->regmap,
+                                        rtc->base + SPRD_RTC_INT_EN,
+                                        SPRD_RTC_AUXALM_EN,
+                                        SPRD_RTC_AUXALM_EN);
+       } else {
+               ret = regmap_update_bits(rtc->regmap,
+                                        rtc->base + SPRD_RTC_INT_EN,
+                                        SPRD_RTC_AUXALM_EN, 0);
+       }
+
+       return ret;
+}
+
+static int sprd_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct sprd_rtc *rtc = dev_get_drvdata(dev);
+       time64_t secs;
+       int ret;
+
+       if (!rtc->valid) {
+               dev_warn(dev, "RTC values are invalid\n");
+               return -EINVAL;
+       }
+
+       ret = sprd_rtc_get_secs(rtc, SPRD_RTC_TIME, &secs);
+       if (ret)
+               return ret;
+
+       rtc_time64_to_tm(secs, tm);
+       return rtc_valid_tm(tm);
+}
+
+static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct sprd_rtc *rtc = dev_get_drvdata(dev);
+       time64_t secs = rtc_tm_to_time64(tm);
+       u32 val;
+       int ret;
+
+       ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs);
+       if (ret)
+               return ret;
+
+       if (!rtc->valid) {
+               /*
+                * Set SPRD_RTC_POWER_RESET_FLAG to indicate now RTC has valid
+                * time values.
+                */
+               ret = regmap_update_bits(rtc->regmap,
+                                        rtc->base + SPRD_RTC_SPG_UPD,
+                                        SPRD_RTC_POWER_RESET_FLAG,
+                                        SPRD_RTC_POWER_RESET_FLAG);
+               if (ret)
+                       return ret;
+
+               ret = regmap_read_poll_timeout(rtc->regmap,
+                                              rtc->base + SPRD_RTC_INT_RAW_STS,
+                                              val, (val & SPRD_RTC_SPG_UPD_EN),
+                                              SPRD_RTC_POLL_DELAY_US,
+                                              SPRD_RTC_POLL_TIMEOUT);
+               if (ret) {
+                       dev_err(rtc->dev, "failed to update SPG value:%d\n",
+                               ret);
+                       return ret;
+               }
+
+               rtc->valid = true;
+       }
+
+       return 0;
+}
+
+static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct sprd_rtc *rtc = dev_get_drvdata(dev);
+       time64_t secs;
+       int ret;
+       u32 val;
+
+       /*
+        * If aie_timer is enabled, we should get the normal alarm time.
+        * Otherwise we should get auxiliary alarm time.
+        */
+       if (rtc->rtc && rtc->rtc->aie_timer.enabled == 0)
+               return sprd_rtc_read_aux_alarm(dev, alrm);
+
+       ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
+       if (ret)
+               return ret;
+
+       rtc_time64_to_tm(secs, &alrm->time);
+
+       ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+       if (ret)
+               return ret;
+
+       alrm->enabled = !!(val & SPRD_RTC_ALARM_EN);
+
+       ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+       if (ret)
+               return ret;
+
+       alrm->pending = !!(val & SPRD_RTC_ALARM_EN);
+       return 0;
+}
+
+static int sprd_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct sprd_rtc *rtc = dev_get_drvdata(dev);
+       time64_t secs = rtc_tm_to_time64(&alrm->time);
+       struct rtc_time aie_time =
+               rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires);
+       int ret;
+
+       /*
+        * We have 2 groups alarms: normal alarm and auxiliary alarm. Since
+        * both normal alarm event and auxiliary alarm event can wake up system
+        * from deep sleep, but only alarm event can power up system from power
+        * down status. Moreover we do not need to poll about 125ms when
+        * updating auxiliary alarm registers. Thus we usually set auxiliary
+        * alarm when wake up system from deep sleep, and for other scenarios,
+        * we should set normal alarm with polling status.
+        *
+        * So here we check if the alarm time is set by aie_timer, if yes, we
+        * should set normal alarm, if not, we should set auxiliary alarm which
+        * means it is just a wake event.
+        */
+       if (!rtc->rtc->aie_timer.enabled || rtc_tm_sub(&aie_time, &alrm->time))
+               return sprd_rtc_set_aux_alarm(dev, alrm);
+
+       /* clear the alarm interrupt status firstly */
+       ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+                          SPRD_RTC_ALARM_EN);
+       if (ret)
+               return ret;
+
+       ret = sprd_rtc_set_secs(rtc, SPRD_RTC_ALARM, secs);
+       if (ret)
+               return ret;
+
+       if (alrm->enabled) {
+               ret = regmap_update_bits(rtc->regmap,
+                                        rtc->base + SPRD_RTC_INT_EN,
+                                        SPRD_RTC_ALARM_EN,
+                                        SPRD_RTC_ALARM_EN);
+               if (ret)
+                       return ret;
+
+               /* unlock the alarm to enable the alarm function. */
+               ret = sprd_rtc_lock_alarm(rtc, false);
+       } else {
+               regmap_update_bits(rtc->regmap,
+                                  rtc->base + SPRD_RTC_INT_EN,
+                                  SPRD_RTC_ALARM_EN, 0);
+
+               /*
+                * Lock the alarm function in case fake alarm event will power
+                * up systems.
+                */
+               ret = sprd_rtc_lock_alarm(rtc, true);
+       }
+
+       return ret;
+}
+
+static int sprd_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct sprd_rtc *rtc = dev_get_drvdata(dev);
+       int ret;
+
+       if (enabled) {
+               ret = regmap_update_bits(rtc->regmap,
+                                        rtc->base + SPRD_RTC_INT_EN,
+                                        SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN,
+                                        SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN);
+               if (ret)
+                       return ret;
+
+               ret = sprd_rtc_lock_alarm(rtc, false);
+       } else {
+               regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+                                  SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, 0);
+
+               ret = sprd_rtc_lock_alarm(rtc, true);
+       }
+
+       return ret;
+}
+
+static const struct rtc_class_ops sprd_rtc_ops = {
+       .read_time = sprd_rtc_read_time,
+       .set_time = sprd_rtc_set_time,
+       .read_alarm = sprd_rtc_read_alarm,
+       .set_alarm = sprd_rtc_set_alarm,
+       .alarm_irq_enable = sprd_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t sprd_rtc_handler(int irq, void *dev_id)
+{
+       struct sprd_rtc *rtc = dev_id;
+       int ret;
+
+       ret = sprd_rtc_clear_alarm_ints(rtc);
+       if (ret)
+               return IRQ_RETVAL(ret);
+
+       rtc_update_irq(rtc->rtc, 1, RTC_AF | RTC_IRQF);
+       return IRQ_HANDLED;
+}
+
+static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
+{
+       u32 val;
+       int ret;
+
+       ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+       if (ret)
+               return ret;
+
+       /*
+        * If the SPRD_RTC_POWER_RESET_FLAG was not set, which means the RTC has
+        * been powered down, so the RTC time values are invalid.
+        */
+       rtc->valid = (val & SPRD_RTC_POWER_RESET_FLAG) ? true : false;
+       return 0;
+}
+
+static int sprd_rtc_probe(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct sprd_rtc *rtc;
+       int ret;
+
+       rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+       if (!rtc)
+               return -ENOMEM;
+
+       rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!rtc->regmap)
+               return -ENODEV;
+
+       ret = of_property_read_u32(node, "reg", &rtc->base);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to get RTC base address\n");
+               return ret;
+       }
+
+       rtc->irq = platform_get_irq(pdev, 0);
+       if (rtc->irq < 0) {
+               dev_err(&pdev->dev, "failed to get RTC irq number\n");
+               return rtc->irq;
+       }
+
+       rtc->dev = &pdev->dev;
+       platform_set_drvdata(pdev, rtc);
+
+       /* clear all RTC interrupts and disable all RTC interrupts */
+       ret = sprd_rtc_disable_ints(rtc);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to disable RTC interrupts\n");
+               return ret;
+       }
+
+       /* check if RTC time values are valid */
+       ret = sprd_rtc_check_power_down(rtc);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to check RTC time values\n");
+               return ret;
+       }
+
+       ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+                                       sprd_rtc_handler,
+                                       IRQF_ONESHOT | IRQF_EARLY_RESUME,
+                                       pdev->name, rtc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to request RTC irq\n");
+               return ret;
+       }
+
+       rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+                                           &sprd_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc->rtc))
+               return PTR_ERR(rtc->rtc);
+
+       device_init_wakeup(&pdev->dev, 1);
+       return 0;
+}
+
+static int sprd_rtc_remove(struct platform_device *pdev)
+{
+       device_init_wakeup(&pdev->dev, 0);
+       return 0;
+}
+
+static const struct of_device_id sprd_rtc_of_match[] = {
+       { .compatible = "sprd,sc2731-rtc", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, sprd_rtc_of_match);
+
+static struct platform_driver sprd_rtc_driver = {
+       .driver = {
+               .name = "sprd-rtc",
+               .of_match_table = sprd_rtc_of_match,
+       },
+       .probe  = sprd_rtc_probe,
+       .remove = sprd_rtc_remove,
+};
+module_platform_driver(sprd_rtc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum RTC Device Driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
index e364550eb9a7fad8ef4efed7ca2f200116832e9c..92ff2edb86a653afecda26f6ec9422715fb78e79 100644 (file)
@@ -72,9 +72,10 @@ since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf)
 
        retval = rtc_read_time(to_rtc_device(dev), &tm);
        if (retval == 0) {
-               unsigned long time;
-               rtc_tm_to_time(&tm, &time);
-               retval = sprintf(buf, "%lu\n", time);
+               time64_t time;
+
+               time = rtc_tm_to_time64(&tm);
+               retval = sprintf(buf, "%lld\n", time);
        }
 
        return retval;
@@ -132,7 +133,7 @@ static ssize_t
 wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        ssize_t retval;
-       unsigned long alarm;
+       time64_t alarm;
        struct rtc_wkalrm alm;
 
        /* Don't show disabled alarms.  For uniformity, RTC alarms are
@@ -145,8 +146,8 @@ wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
         */
        retval = rtc_read_alarm(to_rtc_device(dev), &alm);
        if (retval == 0 && alm.enabled) {
-               rtc_tm_to_time(&alm.time, &alarm);
-               retval = sprintf(buf, "%lu\n", alarm);
+               alarm = rtc_tm_to_time64(&alm.time);
+               retval = sprintf(buf, "%lld\n", alarm);
        }
 
        return retval;
@@ -157,8 +158,8 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t n)
 {
        ssize_t retval;
-       unsigned long now, alarm;
-       unsigned long push = 0;
+       time64_t now, alarm;
+       time64_t push = 0;
        struct rtc_wkalrm alm;
        struct rtc_device *rtc = to_rtc_device(dev);
        const char *buf_ptr;
@@ -170,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
        retval = rtc_read_time(rtc, &alm.time);
        if (retval < 0)
                return retval;
-       rtc_tm_to_time(&alm.time, &now);
+       now = rtc_tm_to_time64(&alm.time);
 
        buf_ptr = buf;
        if (*buf_ptr == '+') {
@@ -181,7 +182,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
                } else
                        adjust = 1;
        }
-       retval = kstrtoul(buf_ptr, 0, &alarm);
+       retval = kstrtos64(buf_ptr, 0, &alarm);
        if (retval)
                return retval;
        if (adjust) {
@@ -197,7 +198,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
                        return retval;
                if (alm.enabled) {
                        if (push) {
-                               rtc_tm_to_time(&alm.time, &push);
+                               push = rtc_tm_to_time64(&alm.time);
                                alarm += push;
                        } else
                                return -EBUSY;
@@ -212,7 +213,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
                 */
                alarm = now + 300;
        }
-       rtc_time_to_tm(alarm, &alm.time);
+       rtc_time64_to_tm(alarm, &alm.time);
 
        retval = rtc_set_alarm(rtc, &alm);
        return (retval < 0) ? retval : n;
index 65b432a096fe22ffc489f876c355f05654315bb4..0c34d3b81279e535bbe027e77bce04443bd5b05a 100644 (file)
@@ -52,6 +52,7 @@ struct xgene_rtc_dev {
        void __iomem *csr_base;
        struct clk *clk;
        unsigned int irq_wake;
+       unsigned int irq_enabled;
 };
 
 static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -104,15 +105,19 @@ static int xgene_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
        return 0;
 }
 
+static int xgene_rtc_alarm_irq_enabled(struct device *dev)
+{
+       struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
+
+       return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1 : 0;
+}
+
 static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
        struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
-       unsigned long rtc_time;
        unsigned long alarm_time;
 
-       rtc_time = readl(pdata->csr_base + RTC_CCVR);
        rtc_tm_to_time(&alrm->time, &alarm_time);
-
        pdata->alarm_time = alarm_time;
        writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR);
 
@@ -180,12 +185,18 @@ static int xgene_rtc_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Couldn't get the clock for RTC\n");
                return -ENODEV;
        }
-       clk_prepare_enable(pdata->clk);
+       ret = clk_prepare_enable(pdata->clk);
+       if (ret)
+               return ret;
 
        /* Turn on the clock and the crystal */
        writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
 
-       device_init_wakeup(&pdev->dev, 1);
+       ret = device_init_wakeup(&pdev->dev, 1);
+       if (ret) {
+               clk_disable_unprepare(pdata->clk);
+               return ret;
+       }
 
        pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
                                         &xgene_rtc_ops, THIS_MODULE);
@@ -210,45 +221,55 @@ static int xgene_rtc_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int xgene_rtc_suspend(struct device *dev)
+static int __maybe_unused xgene_rtc_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
        int irq;
 
        irq = platform_get_irq(pdev, 0);
+
+       /*
+        * If this RTC alarm will be used for waking the system up,
+        * don't disable it of course. Else we just disable the alarm
+        * and await suspension.
+        */
        if (device_may_wakeup(&pdev->dev)) {
                if (!enable_irq_wake(irq))
                        pdata->irq_wake = 1;
        } else {
+               pdata->irq_enabled = xgene_rtc_alarm_irq_enabled(dev);
                xgene_rtc_alarm_irq_enable(dev, 0);
-               clk_disable(pdata->clk);
+               clk_disable_unprepare(pdata->clk);
        }
-
        return 0;
 }
 
-static int xgene_rtc_resume(struct device *dev)
+static int __maybe_unused xgene_rtc_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
        int irq;
+       int rc;
 
        irq = platform_get_irq(pdev, 0);
+
        if (device_may_wakeup(&pdev->dev)) {
                if (pdata->irq_wake) {
                        disable_irq_wake(irq);
                        pdata->irq_wake = 0;
                }
        } else {
-               clk_enable(pdata->clk);
-               xgene_rtc_alarm_irq_enable(dev, 1);
+               rc = clk_prepare_enable(pdata->clk);
+               if (rc) {
+                       dev_err(dev, "Unable to enable clock error %d\n", rc);
+                       return rc;
+               }
+               xgene_rtc_alarm_irq_enable(dev, pdata->irq_enabled);
        }
 
        return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(xgene_rtc_pm_ops, xgene_rtc_suspend, xgene_rtc_resume);
 
index 29f35e29d4801f83aa74b0a590bdb4412a9caa7c..66e008f7adb6c890a43199168dd118a814489797 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -70,8 +71,8 @@ static void do_restore_device(struct work_struct *);
 static void do_reload_device(struct work_struct *);
 static void do_requeue_requests(struct work_struct *);
 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
-static void dasd_device_timeout(unsigned long);
-static void dasd_block_timeout(unsigned long);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
 static void dasd_profile_init(struct dasd_profile *, struct dentry *);
 static void dasd_profile_exit(struct dasd_profile *);
@@ -119,9 +120,7 @@ struct dasd_device *dasd_alloc_device(void)
                     (void (*)(unsigned long)) dasd_device_tasklet,
                     (unsigned long) device);
        INIT_LIST_HEAD(&device->ccw_queue);
-       init_timer(&device->timer);
-       device->timer.function = dasd_device_timeout;
-       device->timer.data = (unsigned long) device;
+       timer_setup(&device->timer, dasd_device_timeout, 0);
        INIT_WORK(&device->kick_work, do_kick_device);
        INIT_WORK(&device->restore_device, do_restore_device);
        INIT_WORK(&device->reload_device, do_reload_device);
@@ -163,9 +162,7 @@ struct dasd_block *dasd_alloc_block(void)
                     (unsigned long) block);
        INIT_LIST_HEAD(&block->ccw_queue);
        spin_lock_init(&block->queue_lock);
-       init_timer(&block->timer);
-       block->timer.function = dasd_block_timeout;
-       block->timer.data = (unsigned long) block;
+       timer_setup(&block->timer, dasd_block_timeout, 0);
        spin_lock_init(&block->profile.lock);
 
        return block;
@@ -1560,12 +1557,12 @@ EXPORT_SYMBOL(dasd_start_IO);
  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
  * DASD_CQR_QUEUED for 2) and 3).
  */
-static void dasd_device_timeout(unsigned long ptr)
+static void dasd_device_timeout(struct timer_list *t)
 {
        unsigned long flags;
        struct dasd_device *device;
 
-       device = (struct dasd_device *) ptr;
+       device = from_timer(device, t, timer);
        spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
        /* re-activate request queue */
        dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
@@ -2628,12 +2625,12 @@ EXPORT_SYMBOL(dasd_cancel_req);
  * is waiting for something that may not come reliably, (e.g. a state
  * change interrupt)
  */
-static void dasd_block_timeout(unsigned long ptr)
+static void dasd_block_timeout(struct timer_list *t)
 {
        unsigned long flags;
        struct dasd_block *block;
 
-       block = (struct dasd_block *) ptr;
+       block = from_timer(block, t, timer);
        spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
        /* re-activate request queue */
        dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
index c95a4784c1911ef1540450f9fc6c2ffee97fcee5..e7cd28ff1984460540f70fd203326b08f2d5c8f9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
index 98fb28e49d2c0605c5ea3e2062d4ad790788d504..f035c2f25d35a8c5aa12cb0552aaf3484edb2798 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Based on.......: linux/drivers/s390/block/mdisk.c
index 8eafcd5fa0049ed9d3384aa6a8999fcec4b61ba2..1a41ef49633875a08ba2021ffa729a39881ec102 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  *                 Horst Hummel <Horst.Hummel@de.ibm.com>
index 6168ccdb389c37bb686196f49d68f0780b8fd010..a6b132f7e869eb4eb804b3fa8407cd064c92b699 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  * Bugreports.to..: <Linux390@de.ibm.com>
index 7abb240847c07dd0b24f3f2e7f03d221a1416f5f..6aaefb78043696e658e36b6637b4e6dde59b5c83 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * dcssblk.c -- the S/390 block driver for dcss memory
  *
index eb51893c74a4ba4053fe8d15e064fbf42bed9845..b4130c7880d874862f14eeb381f36c472b231a0d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Block driver for s390 storage class memory.
  *
index 571a0709e1e5b98ba14708d13e9f944e5ad85a6a..2a6334ca750efdf68f818df0af4b08ac66b8df78 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Xpram.c -- the S/390 expanded memory RAM-disk
  *           
index c4518168fd02c98013b349e17fdba30c8d65eec5..61822480a2a0bdfa808e4f9d3e19965857e7700d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - fullscreen driver.
  *
index 251a318a9b7541452c0142f0f0f8ce84167b8dc2..1447d08872253e3498914fb6da6c3504cd207f47 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    HMC Drive DVD Module
  *
index 027ac6ae5eea512c530a9afbb87bb31ad2bedd8e..bf4ab4efed7355dd88007c7bfc304f5251530e9a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for reading z/VM *MONITOR service records.
  *
index 571a7e3527553ad905612007b860197ca4105b5a..76c158c41510374ac4b814aca55587193c8b8fe7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Character device driver for writing z/VM *MONITOR service records.
  *
index 5d4f053d7c38c330d969586fa3eae0b40f6955ca..f8cd2935fbfd48c5aef1ad980457cc55433b6db4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * IBM/3270 Driver - core functions.
  *
index 9b4c61c1e3097e8888e9e4b6fbacad76e30df2b8..e4e2df7a478e36aba8ee63775195b4c97886ead8 100644 (file)
@@ -158,7 +158,7 @@ static inline void
 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
 {
        del_timer(&sclp_request_timer);
-       sclp_request_timer.function = (TIMER_FUNC_TYPE)cb;
+       sclp_request_timer.function = cb;
        sclp_request_timer.expires = jiffies + time;
        add_timer(&sclp_request_timer);
 }
@@ -566,7 +566,7 @@ sclp_sync_wait(void)
                if (timer_pending(&sclp_request_timer) &&
                    get_tod_clock_fast() > timeout &&
                    del_timer(&sclp_request_timer))
-                       sclp_request_timer.function((TIMER_DATA_TYPE)&sclp_request_timer);
+                       sclp_request_timer.function(&sclp_request_timer);
                cpu_relax();
        }
        local_irq_disable();
index 19c25427f27fdd702864153fe64f71abb2a175b0..ee6f3b563728319ba5c3d4964f05843453e3ce99 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Enable Asynchronous Notification via SCLP.
  *
index de69f0ddc321dedbb7270ae9fcdf75afe1d148fc..6d73ee3f827a6ca401b0eaa5e4f66b2e6e1766e5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3480/3490 tapes.
  *
index e352047ed9f7a8d6d8ec0a70a688c1efadbfdf9d..37e65a05517f50606f73db539e0871e76452d142 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    tape device discipline for 3590 tapes.
  *
index e7d23048d3f00d0ea1d2a59bf1128d38f4cb6d1d..a07102472ce97eba06a526dcb56d5690b9be2fd4 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2004
  *
index 32503a60ee851698049c2fc1221ce01c581ebb6a..8d3370da2dfc294e1286caa337bd9d305fb624c5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    basic function of the tape device driver
  *
index e417ccd9e299891560b2b2c1e67565f0eb46df47..1c98023cffd4165a8ad5117c907fa9258d918c4f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    IBM/3270 Driver - tty functions.
  *
index 62559dc0169f8c9f32a4677e946e0e88880ae17f..069b9ef08206b1bc7168bdbfd4dd3de2ba026e6c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *     character device driver for reading z/VM system service records
  *
index fa90ef05afc00c32805238c4dccdb1a5c589390d..52aa894243187484c03bf301d274990cdbeacb32 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux driver for System z and s390 unit record devices
  * (z/VM virtual punch, reader, printer)
index aaed778f67c4ab84bf13dc9435260a5bdae361dc..4369662cfff5a7ad094d522590901bc845933872 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * zcore module to export memory content and register sets for creating system
  * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
@@ -7,7 +8,6 @@
  *
  * Copyright IBM Corp. 2003, 2008
  * Author(s): Michael Holzheu
- * License: GPL
  */
 
 #define KMSG_COMPONENT "zdump"
index e2f7b6e93efddf85dd45457d70ce3c3bc3602ba3..bfec1485ca2332ac5bfe8adf2e7c6c50307a3c97 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  bus driver for ccwgroup
  *
index f4166f80c4d4e4408c97e8742a7275b90464898d..5c94a3aec4dd293dfdce50e33219037564ebf118 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 1999, 2010
  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
index 7b0b295b2313b8f3056378ed64c4249381a0213b..c08fc5a8df0c61935c02e282a6ec868d2d0e7630 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- channel subsystem call
  *
index 8e7e19b9e92c028e1097fe9ff82386203594bc37..0015729d917d90e049a1def14cba883ec2c31cc3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 chsc subchannels
  *
index 89216174fcbba8f83d3a0a79a633f6e1cb91ad88..987bf9a8c9f7237d06c578e42807a6e168ef7dfd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *   S/390 common I/O routines -- low level i/o calls
  *
index 7d59230e88bb3a2452e8e4eaf667f25659d3d13d..5e495c62cfa7749aef468cc137d0d5eab0c959c7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Linux on zSeries Channel Measurement Facility support
  *
@@ -7,20 +8,6 @@
  *         Cornelia Huck <cornelia.huck@de.ibm.com>
  *
  * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "cio"
index d3e504c3c362655f4eec5903893d8bc8f8e5af2c..0f11dce6e2240c14151ab690fd28e1a39c0694a2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * driver for channel subsystem
  *
@@ -5,8 +6,6 @@
  *
  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index 318d8269f5dee10c56114224b4a08b8b617a96da..75a245f38e2eb7558b9da624a1e2f190cd77b8c2 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  *  bus driver for ccw devices
  *
@@ -5,8 +6,6 @@
  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *              Cornelia Huck (cornelia.huck@de.ibm.com)
  *              Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
index dd7d79d30edc440662a02432a3ad3ce822503225..1319122e9d1231920ef0325a9e56a4c6de91ff80 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * finite state machine for device handling
  *
index cf8c4ac6323a6d1c91dfe93dfdb22e2d9d0432b3..1caf6a398760bb1f156f5c088759f12e6039e589 100644 (file)
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
 /*
  * Copyright IBM Corp. 2002, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  *           Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
  */
 #include <linux/export.h>
 #include <linux/init.h>
index ce16e4f45d440fd25538d9223db5e07566ad22f5..53468ae64b999fa17bc154bb7eeda57293aed2da 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for s390 eadm subchannels
  *
index c592087be0f1a6b0b8083dce448e278c2388c7c9..77fde9f5ea8baeb55ff403096358d20ff7974c5e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Functions for registration of I/O interruption subclasses on s390.
  *
index ed4852fab44b5737fa5edae05ddd640067486304..59b4a3370cd5d454cadc9e145bcb3a2dc9a321df 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Linux for s390 qdio support, buffer handling, qdio API and module support.
  *
index 9ae1380cbc31300f5e251f03e6027ad903b2d666..98f3cfdc0d027dd0c0e7bcd8d05be86c0fc3a09d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * qdio queue initialization
  *
index 1fa53ecdc2aaa2ec1a81b7bf65b5d0dcf32a16c2..6bca1d5455d4f6ce1997d39d09792a90e65511a0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Recognize and maintain s390 storage class memory.
  *
index 82f05c4b8c526f73a52aed819eacb33cdfd18e3a..ea6a2d0b2894decac95c3421c544183ee89c3383 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * VFIO based Physical Subchannel device driver
  *
index faeba9db3d95999526fdf2ab0667751cd82ab1e0..48d55dc9e98648738b78f3dbc311ea3e141573cd 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "ap"
index 7e45c4d08cad40e9124913abd79b715312cbd990..e0827eaa42f1dda711ed08fbf8d61dc96b65630f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright IBM Corp. 2006, 2012
  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -7,20 +8,6 @@
  *           Holger Dengler <hd@linux.vnet.ibm.com>
  *
  * Adjunct processor bus header file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _AP_BUS_H_
index 8dda5bb34a2f2710c6d0f8fc40b291d3e848d99e..e7c2e4f9529ac6bab55a8df8f854c8ed64442cdc 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  pkey device driver
  *
  *  Copyright IBM Corp. 2017
  *  Author(s): Harald Freudenberger
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  */
 
 #define KMSG_COMPONENT "pkey"
index b5f4006198b9e0d977b04c2c08d7626c48056569..ce15f101ee282701cdf55ac0ddba557001c737fc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
@@ -218,8 +205,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
        weight += atomic_read(&zq->load);
        pref_weight += atomic_read(&pref_zq->load);
        if (weight == pref_weight)
-               return &zq->queue->total_request_count >
-                       &pref_zq->queue->total_request_count;
+               return zq->queue->total_request_count >
+                       pref_zq->queue->total_request_count;
        return weight > pref_weight;
 }
 
index 73541a798db7a4a1c41bd0e555f33bd1a1bbbbde..9fff8912f6e3b05bd7b2c76a529e5a99d6aec863 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_API_H_
index f85dacf1c28442a38dac33978a20d412ceaf8c42..233e1e695208b9b870edb4259127c41e2ee3357a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index 12cff6262566b5f4c1960b5497e37da7025ad7ad..011d61d8a4ae5869e7d41d1654a218c5f53f96bc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CCA_KEY_H_
index b97c5d5ee5a4aba9e70f88f791b674046092a6f7..e701194d36115c06d4435df2b1dcc4dbc9cc103e 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 0dce4b9af184114ecdc13bf3741ab167dfb402d8..c3c116777c937cd4fafbe974f765cd5725327f27 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_CEX2A_H_
index e2eebc775a37a0e80ced5520cb5446d85e33fb23..f305538334adb14f0dd296fd2466cb478a2ba3ba 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  Copyright IBM Corp. 2012
  *  Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
index 13df60209ed33a05604e34e381d40a50b1c40904..01598d83c60a0a1c478c1ade86ea603feb05eab0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -7,20 +8,6 @@
  *
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_ERROR_H_
index db5bde47dfb0d17b49a8dad9eeec8279c2a6e226..afe1b2bcd7ecf5e211712b567ca186f3a32e4a57 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 5cc280318ee705e9e64fe035b9ccb32c8aa82a40..0a36545cfb8eeb09da1f2136ba31adde5b54775d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE50_H_
index 785620d3050433e33a2af975595d6969e067e45e..f54bef4a928e90b34e7158d97f1d1db998617bf3 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT "zcrypt"
index 7a0d5b57821f07868c9af78da9873c1eca997c2e..d314f4525518b63693d70df42e768a8dc9e8af33 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_MSGTYPE6_H_
index 600604782b65e972705d01984568134949e61039..159b0a0dd211b9a561cad4d326c5d481b1a8418d 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -9,20 +10,6 @@
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index eacafc8962f204377d2c95f8d9a5bfd032d16eb2..d678a3af83a7baa9dda7b1dfdcba632014b975a1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
@@ -8,20 +9,6 @@
  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef _ZCRYPT_PCIXCC_H_
index 4742be0eec24f8ca87787105eada3974ab61e4ab..720434e18007e3a8e1c9e5228c4841ba0cb782a5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  zcrypt 2.1.0
  *
  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  *                               Ralph Wuerthner <rwuerthn@de.ibm.com>
  *  MSGTYPE restruct:            Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
index be9f172185310ac081d6a44f0e061868bc5885d3..7ce98b70cad38bf55be1fd4a15bdaa62761ff159 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright IBM Corp. 2001, 2009
  * Author(s):
index 8c14c6c3ad3d65fa5f0524d49720c1d3321246f3..eb07862bd36a03f5d043ee1fb492461695a506ee 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /**
  * A generic FSM based on fsm used in isdn4linux
  *
@@ -129,8 +130,9 @@ fsm_getstate_str(fsm_instance *fi)
 }
 
 static void
-fsm_expire_timer(fsm_timer *this)
+fsm_expire_timer(struct timer_list *t)
 {
+       fsm_timer *this = from_timer(this, t, tl);
 #if FSM_TIMER_DEBUG
        printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
               this->fi->name, this);
@@ -142,13 +144,11 @@ void
 fsm_settimer(fsm_instance *fi, fsm_timer *this)
 {
        this->fi = fi;
-       this->tl.function = (void *)fsm_expire_timer;
-       this->tl.data = (long)this;
 #if FSM_TIMER_DEBUG
        printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
               this);
 #endif
-       init_timer(&this->tl);
+       timer_setup(&this->tl, fsm_expire_timer, 0);
 }
 
 void
@@ -170,7 +170,7 @@ fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
               this->fi->name, this, millisec);
 #endif
 
-       setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+       timer_setup(&this->tl, fsm_expire_timer, 0);
        this->expire_event = event;
        this->event_arg = arg;
        this->tl.expires = jiffies + (millisec * HZ) / 1000;
@@ -189,7 +189,7 @@ fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
 #endif
 
        del_timer(&this->tl);
-       setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+       timer_setup(&this->tl, fsm_expire_timer, 0);
        this->expire_event = event;
        this->event_arg = arg;
        this->tl.expires = jiffies + (millisec * HZ) / 1000;
index e131a03262ad7bcb3041a9e1e98da574930c727b..92ae84a927fcf391abebaccbd907d5d962ed9bed 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  *  Linux for S/390 Lan Channel Station Network Driver
  *
@@ -7,20 +8,6 @@
  *            Rewritten by
  *                     Frank Pavlic <fpavlic@de.ibm.com> and
  *                     Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define KMSG_COMPONENT         "lcs"
index b9c7c1e61da296f743f7bbd6f5d30e43d5940117..5ce2424ca7290397e43b55c66581071424da99b9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV network driver
  *
  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  */
 
 #define KMSG_COMPONENT "netiucv"
index 49b9efeba1bda1e2390289b8ba536fa7bad0542c..98a7f84540ab2c51e483b7a4d9368170640fef77 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
index b22ed2a57acd94661c97b77966246f0785c77ae2..ae81534de91228910fd877fce0e1e262cc24fddf 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
index d2537c09126d676a77eee6d2bbacb54030c2f657..93d7e345d18043e18fe75c2b87bd4ff1bddfce32 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
index aadd384316a375f15506cada437a818f4a621239..0f8c12738b067d94184862777b3f2d7e2594a846 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    Copyright IBM Corp. 2007, 2009
  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
index a851d34c642b5d26866fafdde925eb48ddf61003..3b0c8b8a7634d18df62ece8f94936ed39666a2af 100644 (file)
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * IUCV special message driver
  *
  * Copyright IBM Corp. 2003, 2009
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
index 32515a201bbc65c4a0c6e9d48282a188ff16cdda..0a263999f7ae44b181ac7dc786908698c42a9974 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Deliver z/VM CP special messages (SMSG) as uevents.
  *
index 84752152d41fd682c5ae350ddb4bd3ac80d47cde..a3a8c8d9d7171a8d6994548212084ff3380ba493 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * zfcp device driver
  *
index 51b81c0a06520bfaa55446aa443b3d394f4c120e..b12cb81ad8a23a84beac4f3455f9234b3dd7e0a9 100644 (file)
@@ -34,7 +34,7 @@ static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
                                 unsigned long timeout)
 {
-       fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_fsf_request_timeout_handler;
+       fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
        fsf_req->timer.expires = jiffies + timeout;
        add_timer(&fsf_req->timer);
 }
@@ -42,7 +42,7 @@ static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
 {
        BUG_ON(!fsf_req->erp_action);
-       fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_erp_timeout_handler;
+       fsf_req->timer.function = zfcp_erp_timeout_handler;
        fsf_req->timer.expires = jiffies + 30 * HZ;
        add_timer(&fsf_req->timer);
 }
index f68af1f317f15460d489c9b8324ebc4d06142ca9..2dc4d9aab634592363138cb69a07f6885fb25438 100644 (file)
@@ -1,9 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
 # Makefile for kvm guest drivers on s390
 #
 # Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
 
 obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
index b18fe2014cf2195a193186c08c956dc8e5cfe7e3..ba2e0856d22cdfb5396457366276e01bc9ac7851 100644 (file)
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * ccw based virtio transport
  *
  * Copyright IBM Corp. 2012, 2014
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  */
 
index f32765d3cbd89dd3cd50186f803b74a44ea0973f..5c8ed7350a04a4f65be2eefa103637f2943e5a37 100644 (file)
@@ -22,7 +22,6 @@
 
 #include <asm/display7seg.h>
 
-#define D7S_MINOR      193
 #define DRIVER_NAME    "d7s"
 #define PFX            DRIVER_NAME ": "
 
index 5402b85b0bdc397361e4d9ba965f995675b81eeb..2dbc8330d7d34b4e9904e88f6ae79778c50cd07c 100644 (file)
@@ -1175,7 +1175,7 @@ static void asd_start_scb_timers(struct list_head *list)
        struct asd_ascb *ascb;
        list_for_each_entry(ascb, list, list) {
                if (!ascb->uldd_timer) {
-                       ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout;
+                       ascb->timer.function = asd_ascb_timedout;
                        ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
                        add_timer(&ascb->timer);
                }
index 4637119c09d8aad052c566cf6e9fb2d16c7e87f3..2a01702d5ba77ffcdee48e56d47c71646fd2f8fe 100644 (file)
@@ -42,7 +42,7 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
        ascb->tasklet_complete = tasklet_complete;
        ascb->uldd_timer = 1;
 
-       ascb->timer.function = (TIMER_FUNC_TYPE)timed_out;
+       ascb->timer.function = timed_out;
        ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
 
        add_timer(&ascb->timer);
index af032c46ec0e1950deecbc69a33c24035bb4ecae..21f6421536a05f5c20c72277733bc9aecd6b5b4e 100644 (file)
@@ -101,7 +101,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
-static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_device_map(struct timer_list *t);
 static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
 static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
 static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
@@ -837,10 +837,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        atomic_set(&acb->rq_map_token, 16);
        atomic_set(&acb->ante_token_value, 16);
        acb->fw_flag = FW_NORMAL;
-       init_timer(&acb->eternal_timer);
+       timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
        acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
-       acb->eternal_timer.data = (unsigned long) acb;
-       acb->eternal_timer.function = &arcmsr_request_device_map;
        add_timer(&acb->eternal_timer);
        if(arcmsr_alloc_sysfs_attr(acb))
                goto out_free_sysfs;
@@ -930,10 +928,8 @@ static int arcmsr_resume(struct pci_dev *pdev)
        atomic_set(&acb->rq_map_token, 16);
        atomic_set(&acb->ante_token_value, 16);
        acb->fw_flag = FW_NORMAL;
-       init_timer(&acb->eternal_timer);
+       timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
        acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
-       acb->eternal_timer.data = (unsigned long) acb;
-       acb->eternal_timer.function = &arcmsr_request_device_map;
        add_timer(&acb->eternal_timer);
        return 0;
 controller_stop:
@@ -3459,9 +3455,9 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
        }
 }
 
-static void arcmsr_request_device_map(unsigned long pacb)
+static void arcmsr_request_device_map(struct timer_list *t)
 {
-       struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+       struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
        switch (acb->adapter_type) {
                case ACB_ADAPTER_TYPE_A: {
                        arcmsr_hbaA_request_device_map(acb);
index 24388795ee9a30ea933a3c11b60009fcff7b656f..f4775ca70babac8e1b9bec3dba86220ee59937fb 100644 (file)
@@ -2318,9 +2318,9 @@ DEF_SCSI_QCMD(fas216_noqueue_command)
  * Error handler timeout function.  Indicate that we timed out,
  * and wake up any error handler process so it can continue.
  */
-static void fas216_eh_timer(unsigned long data)
+static void fas216_eh_timer(struct timer_list *t)
 {
-       FAS216_Info *info = (FAS216_Info *)data;
+       FAS216_Info *info = from_timer(info, t, eh_timer);
 
        fas216_log(info, LOG_ERROR, "error handling timed out\n");
 
@@ -2849,9 +2849,7 @@ int fas216_init(struct Scsi_Host *host)
        info->rst_dev_status = -1;
        info->rst_bus_status = -1;
        init_waitqueue_head(&info->eh_wait);
-       init_timer(&info->eh_timer);
-       info->eh_timer.data  = (unsigned long)info;
-       info->eh_timer.function = fas216_eh_timer;
+       timer_setup(&info->eh_timer, fas216_eh_timer, 0);
        
        spin_lock_init(&info->host_lock);
 
index be96aa1e507722da1665f967027b0fb5a16e48e4..b3cfdd5f4d1c3812baf40f34c936b53f3b34acdf 100644 (file)
@@ -5279,7 +5279,7 @@ static void beiscsi_hw_health_check(struct timer_list *t)
                if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
                        return;
                /* modify this timer to check TPE */
-               phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check;
+               phba->hw_check.function = beiscsi_hw_tpe_check;
        }
 
        mod_timer(&phba->hw_check,
@@ -5367,7 +5367,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
         * Timer function gets modified for TPE detection.
         * Always reinit to do health check first.
         */
-       phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check;
+       phba->hw_check.function = beiscsi_hw_health_check;
        mod_timer(&phba->hw_check,
                  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
        return 0;
index 5caf5f3ff642282ee13776e9df9ca9a18f494536..cf04666868045d6e632dfda9a6fff39c4e8d88b1 100644 (file)
@@ -692,9 +692,9 @@ ext:
 }
 
 void
-bfad_bfa_tmo(unsigned long data)
+bfad_bfa_tmo(struct timer_list *t)
 {
-       struct bfad_s         *bfad = (struct bfad_s *) data;
+       struct bfad_s         *bfad = from_timer(bfad, t, hal_tmo);
        unsigned long   flags;
        struct list_head               doneq;
 
@@ -719,9 +719,7 @@ bfad_bfa_tmo(unsigned long data)
 void
 bfad_init_timer(struct bfad_s *bfad)
 {
-       init_timer(&bfad->hal_tmo);
-       bfad->hal_tmo.function = bfad_bfa_tmo;
-       bfad->hal_tmo.data = (unsigned long)bfad;
+       timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0);
 
        mod_timer(&bfad->hal_tmo,
                  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
index cfcfff48e8e16e3fb2b66c0d491c0d612a8cdd38..4fe980a6441f5bbed6bda0892237a8c342653460 100644 (file)
@@ -314,7 +314,7 @@ int         bfad_setup_intr(struct bfad_s *bfad);
 void           bfad_remove_intr(struct bfad_s *bfad);
 void           bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
 bfa_status_t   bfad_hal_mem_alloc(struct bfad_s *bfad);
-void           bfad_bfa_tmo(unsigned long data);
+void           bfad_bfa_tmo(struct timer_list *t);
 void           bfad_init_timer(struct bfad_s *bfad);
 int            bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
 void           bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
index 5b6153f23f01a88d0b93118e889223cf26c93e19..8e2f767147cb43de2acdcabeab820537fc3422d3 100644 (file)
@@ -1084,24 +1084,35 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
 {
        struct bnx2fc_rport *tgt = io_req->tgt;
        int rc = SUCCESS;
+       unsigned int time_left;
 
        io_req->wait_for_comp = 1;
        bnx2fc_initiate_cleanup(io_req);
 
        spin_unlock_bh(&tgt->tgt_lock);
 
-       wait_for_completion(&io_req->tm_done);
-
+       /*
+        * Can't wait forever on cleanup response lest we let the SCSI error
+        * handler wait forever
+        */
+       time_left = wait_for_completion_timeout(&io_req->tm_done,
+                                               BNX2FC_FW_TIMEOUT);
        io_req->wait_for_comp = 0;
+       if (!time_left)
+               BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
+                             __func__);
+
        /*
-        * release the reference taken in eh_abort to allow the
-        * target to re-login after flushing IOs
+        * Release reference held by SCSI command the cleanup completion
+        * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and
+        * thus the SCSI command is not returnedi by bnx2fc_scsi_done().
         */
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
 
        spin_lock_bh(&tgt->tgt_lock);
        return rc;
 }
+
 /**
  * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
  *                     SCSI command
@@ -1118,6 +1129,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        struct fc_lport *lport;
        struct bnx2fc_rport *tgt;
        int rc;
+       unsigned int time_left;
 
        rc = fc_block_scsi_eh(sc_cmd);
        if (rc)
@@ -1194,6 +1206,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                if (cancel_delayed_work(&io_req->timeout_work))
                        kref_put(&io_req->refcount,
                                 bnx2fc_cmd_release); /* drop timer hold */
+               /*
+                * We don't want to hold off the upper layer timer so simply
+                * cleanup the command and return that I/O was successfully
+                * aborted.
+                */
                rc = bnx2fc_abts_cleanup(io_req);
                /* This only occurs when an task abort was requested while ABTS
                   is in progress.  Setting the IO_CLEANUP flag will skip the
@@ -1201,7 +1218,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                   was a result from the ABTS request rather than the CLEANUP
                   request */
                set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
-               goto out;
+               goto done;
        }
 
        /* Cancel the current timer running on this io_req */
@@ -1221,7 +1238,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
        }
        spin_unlock_bh(&tgt->tgt_lock);
 
-       wait_for_completion(&io_req->tm_done);
+       /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
+       time_left = wait_for_completion_timeout(&io_req->tm_done,
+           (2 * rp->r_a_tov + 1) * HZ);
+       if (time_left)
+               BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done");
 
        spin_lock_bh(&tgt->tgt_lock);
        io_req->wait_for_comp = 0;
@@ -1233,8 +1254,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                /* Let the scsi-ml try to recover this command */
                printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
                       io_req->xid);
+               /*
+                * Cleanup firmware residuals before returning control back
+                * to SCSI ML.
+                */
                rc = bnx2fc_abts_cleanup(io_req);
-               goto out;
+               goto done;
        } else {
                /*
                 * We come here even when there was a race condition
@@ -1249,7 +1274,6 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
 done:
        /* release the reference taken in eh_abort */
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
-out:
        spin_unlock_bh(&tgt->tgt_lock);
        return rc;
 }
index 59a2dfbcbc6991efb8ff2860b17cef4e198a7b46..a8ae1a019eea55eaef72823d9ed58eb376a11976 100644 (file)
@@ -14,8 +14,8 @@
  */
 
 #include "bnx2fc.h"
-static void bnx2fc_upld_timer(unsigned long data);
-static void bnx2fc_ofld_timer(unsigned long data);
+static void bnx2fc_upld_timer(struct timer_list *t);
+static void bnx2fc_ofld_timer(struct timer_list *t);
 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
                           struct fcoe_port *port,
                           struct fc_rport_priv *rdata);
@@ -27,10 +27,10 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
                              struct bnx2fc_rport *tgt);
 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
 
-static void bnx2fc_upld_timer(unsigned long data)
+static void bnx2fc_upld_timer(struct timer_list *t)
 {
 
-       struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+       struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
 
        BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
        /* fake upload completion */
@@ -40,10 +40,10 @@ static void bnx2fc_upld_timer(unsigned long data)
        wake_up_interruptible(&tgt->upld_wait);
 }
 
-static void bnx2fc_ofld_timer(unsigned long data)
+static void bnx2fc_ofld_timer(struct timer_list *t)
 {
 
-       struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+       struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
 
        BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
        /* NOTE: This function should never be called, as
@@ -65,7 +65,7 @@ static void bnx2fc_ofld_timer(unsigned long data)
 
 static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
 {
-       setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+       timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
        mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
 
        wait_event_interruptible(tgt->ofld_wait,
@@ -277,7 +277,7 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
 
 static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
 {
-       setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+       timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
        mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
        wait_event_interruptible(tgt->upld_wait,
                                 (test_bit(
index babd79361a461097f93dedba4fb400d8caecf390..bf07735275a49d7720e98b4af5c4ef7c7693f72b 100644 (file)
@@ -586,8 +586,8 @@ static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        cxgbi_sock_get(csk);
        spin_lock_bh(&csk->lock);
        if (rpl->status == CPL_ERR_CONN_EXIST &&
-           csk->retry_timer.function != (TIMER_FUNC_TYPE)act_open_retry_timer) {
-               csk->retry_timer.function = (TIMER_FUNC_TYPE)act_open_retry_timer;
+           csk->retry_timer.function != act_open_retry_timer) {
+               csk->retry_timer.function = act_open_retry_timer;
                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
        } else
                cxgbi_sock_fail_act_open(csk,
index 266eddf17a991b207f07f67d2fdc0877d63dfb1f..406e94312d4e9a49b0015e811f14b6a02b1177a8 100644 (file)
@@ -963,8 +963,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
        spin_lock_bh(&csk->lock);
 
        if (status == CPL_ERR_CONN_EXIST &&
-           csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) {
-               csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer;
+           csk->retry_timer.function != csk_act_open_retry_timer) {
+               csk->retry_timer.function = csk_act_open_retry_timer;
                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
        } else
                cxgbi_sock_fail_act_open(csk,
index 81f226be3e3b72067b87fe6d2553f91e7783b62f..4eb14301a497bdd7371aed487ccee2f210ac3f81 100644 (file)
@@ -1631,23 +1631,21 @@ void esas2r_adapter_tasklet(unsigned long context)
        }
 }
 
-static void esas2r_timer_callback(unsigned long context);
+static void esas2r_timer_callback(struct timer_list *t);
 
 void esas2r_kickoff_timer(struct esas2r_adapter *a)
 {
-       init_timer(&a->timer);
+       timer_setup(&a->timer, esas2r_timer_callback, 0);
 
-       a->timer.function = esas2r_timer_callback;
-       a->timer.data = (unsigned long)a;
        a->timer.expires = jiffies +
                           msecs_to_jiffies(100);
 
        add_timer(&a->timer);
 }
 
-static void esas2r_timer_callback(unsigned long context)
+static void esas2r_timer_callback(struct timer_list *t)
 {
-       struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+       struct esas2r_adapter *a = from_timer(a, t, timer);
 
        set_bit(AF2_TIMER_TICK, &a->flags2);
 
index fff6f1851dc1e56779acff511c5b6c3625a9f51c..097f37de6ce91231082353f327485d8f189d68ba 100644 (file)
@@ -49,7 +49,7 @@
 #define        FCOE_CTLR_MIN_FKA       500             /* min keep alive (mS) */
 #define        FCOE_CTLR_DEF_FKA       FIP_DEF_FKA     /* default keep alive (mS) */
 
-static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_timeout(struct timer_list *);
 static void fcoe_ctlr_timer_work(struct work_struct *);
 static void fcoe_ctlr_recv_work(struct work_struct *);
 static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
@@ -156,7 +156,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
        mutex_init(&fip->ctlr_mutex);
        spin_lock_init(&fip->ctlr_lock);
        fip->flogi_oxid = FC_XID_UNKNOWN;
-       setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+       timer_setup(&fip->timer, fcoe_ctlr_timeout, 0);
        INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
        INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
        skb_queue_head_init(&fip->fip_recv_list);
@@ -1786,9 +1786,9 @@ unlock:
  * fcoe_ctlr_timeout() - FIP timeout handler
  * @arg: The FCoE controller that timed out
  */
-static void fcoe_ctlr_timeout(unsigned long arg)
+static void fcoe_ctlr_timeout(struct timer_list *t)
 {
-       struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+       struct fcoe_ctlr *fip = from_timer(fip, t, timer);
 
        schedule_work(&fip->timer_work);
 }
index aacadbf20b6954c28990d5dd592182a95cbaeba4..e52599f441707adc00df3bbb8d972f2e3a9ffe96 100644 (file)
@@ -407,18 +407,18 @@ static int fnic_notify_set(struct fnic *fnic)
        return err;
 }
 
-static void fnic_notify_timer(unsigned long data)
+static void fnic_notify_timer(struct timer_list *t)
 {
-       struct fnic *fnic = (struct fnic *)data;
+       struct fnic *fnic = from_timer(fnic, t, notify_timer);
 
        fnic_handle_link_event(fnic);
        mod_timer(&fnic->notify_timer,
                  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 }
 
-static void fnic_fip_notify_timer(unsigned long data)
+static void fnic_fip_notify_timer(struct timer_list *t)
 {
-       struct fnic *fnic = (struct fnic *)data;
+       struct fnic *fnic = from_timer(fnic, t, fip_timer);
 
        fnic_handle_fip_timer(fnic);
 }
@@ -777,8 +777,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
                fnic->set_vlan = fnic_set_vlan;
                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
-               setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
-                                                       (unsigned long)fnic);
+               timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
                spin_lock_init(&fnic->vlans_lock);
                INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
                INIT_WORK(&fnic->event_work, fnic_handle_event);
@@ -809,8 +808,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* Setup notify timer when using MSI interrupts */
        if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
-               setup_timer(&fnic->notify_timer,
-                           fnic_notify_timer, (unsigned long)fnic);
+               timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
 
        /* allocate RQ buffers and post them to RQ*/
        for (i = 0; i < fnic->rq_count; i++) {
index 61a85ff8e459f429b7090cbaa77d4712a7e854c7..5f503cb095085d2baa7e2f24248419dbbb432e29 100644 (file)
@@ -839,7 +839,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
                }
                task->task_done = hisi_sas_task_done;
 
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+               task->slow_task->timer.function = hisi_sas_tmf_timedout;
                task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
@@ -1451,7 +1451,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
        task->dev = device;
        task->task_proto = device->tproto;
        task->task_done = hisi_sas_task_done;
-       task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+       task->slow_task->timer.function = hisi_sas_tmf_timedout;
        task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
        add_timer(&task->slow_task->timer);
 
index d02c2a791981f9a68893efcfc9811666e6494c74..5d3467fd728d9f583491617e9e513266100a48b9 100644 (file)
@@ -1268,7 +1268,7 @@ static void link_timeout_enable_link(struct timer_list *t)
                }
        }
 
-       hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+       hisi_hba->timer.function = link_timeout_disable_link;
        mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
 }
 
@@ -1289,13 +1289,13 @@ static void link_timeout_disable_link(struct timer_list *t)
                }
        }
 
-       hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link;
+       hisi_hba->timer.function = link_timeout_enable_link;
        mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
 }
 
 static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
 {
-       hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+       hisi_hba->timer.function = link_timeout_disable_link;
        hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
        add_timer(&hisi_hba->timer);
 }
index d53429371127a4eb3ea76ce52dc2408ec05b9a11..cc0187965eee95fe242ab853469ed21f92546d66 100644 (file)
@@ -997,7 +997,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
        ipr_cmd->done = done;
 
        ipr_cmd->timer.expires = jiffies + timeout;
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+       ipr_cmd->timer.function = timeout_func;
 
        add_timer(&ipr_cmd->timer);
 
@@ -8312,7 +8312,7 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
        ipr_cmd->done = ipr_reset_ioa_job;
 
        ipr_cmd->timer.expires = jiffies + timeout;
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_reset_timer_done;
+       ipr_cmd->timer.function = ipr_reset_timer_done;
        add_timer(&ipr_cmd->timer);
 }
 
@@ -8397,7 +8397,7 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
        }
 
        ipr_cmd->timer.expires = jiffies + stage_time * HZ;
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+       ipr_cmd->timer.function = ipr_oper_timeout;
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
 
@@ -8468,7 +8468,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
        }
 
        ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
-       ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+       ipr_cmd->timer.function = ipr_oper_timeout;
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
index 1a4e701a844966e4a7db873a3c2dfe3a58768fc2..4fae253d4f3ded0a9453c01383f20a44450122ae 100644 (file)
@@ -1214,7 +1214,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
        fsp->seq_ptr = seq;
        fc_fcp_pkt_hold(fsp);   /* hold for fc_fcp_pkt_destroy */
 
-       fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+       fsp->timer.function = fc_fcp_timeout;
        if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
                fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
 
@@ -1307,7 +1307,7 @@ static void fc_lun_reset_send(struct timer_list *t)
                        return;
                if (fc_fcp_lock_pkt(fsp))
                        return;
-               fsp->timer.function = (TIMER_FUNC_TYPE)fc_lun_reset_send;
+               fsp->timer.function = fc_lun_reset_send;
                fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
                fc_fcp_unlock_pkt(fsp);
        }
@@ -1445,7 +1445,7 @@ static void fc_fcp_timeout(struct timer_list *t)
        if (fsp->lp->qfull) {
                FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
                           fsp->timer_delay);
-               fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+               fsp->timer.function = fc_fcp_timeout;
                fc_fcp_timer_set(fsp, fsp->timer_delay);
                goto unlock;
        }
index 174e5eff615579d3c2822692de37e59a0b219b03..ca1566237ae7744703a9e3ae3533138c4e81af53 100644 (file)
@@ -92,7 +92,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
 
                task->task_done = smp_task_done;
 
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)smp_task_timedout;
+               task->slow_task->timer.function = smp_task_timedout;
                task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
index 91795eb56206603bb0a52baa62237e9787dd2bcf..58476b728c57e11cd20232f1b43215eb3b2c1e5b 100644 (file)
@@ -919,7 +919,7 @@ void sas_task_abort(struct sas_task *task)
                        return;
                if (!del_timer(&slow->timer))
                        return;
-               slow->timer.function((TIMER_DATA_TYPE)&slow->timer);
+               slow->timer.function(&slow->timer);
                return;
        }
 
index cff1c37b8d2e46374ffda812b97877851b1cea2c..cff43bd9f6751ae1197b76c4d9eb22bc92ef2784 100644 (file)
@@ -1310,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
                memcpy(&task->ssp_task, parameter, para_len);
                task->task_done = mvs_task_done;
 
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)mvs_tmf_timedout;
+               task->slow_task->timer.function = mvs_tmf_timedout;
                task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
@@ -2020,7 +2020,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
                MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
                                        tmp | PHYEV_SIG_FIS);
                if (phy->timer.function == NULL) {
-                       phy->timer.function = (TIMER_FUNC_TYPE)mvs_sig_time_out;
+                       phy->timer.function = mvs_sig_time_out;
                        phy->timer.expires = jiffies + 5*HZ;
                        add_timer(&phy->timer);
                }
index 5b93ed810f6ef099e265a5e45305ef26224ee4bd..dc4e801b2cefef35822ea70cace8b6cdf806f33d 100644 (file)
@@ -8093,9 +8093,9 @@ irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
      return IRQ_HANDLED;
 }
 
-static void ncr53c8xx_timeout(unsigned long npref)
+static void ncr53c8xx_timeout(struct timer_list *t)
 {
-       struct ncb *np = (struct ncb *) npref;
+       struct ncb *np = from_timer(np, t, timer);
        unsigned long flags;
        struct scsi_cmnd *done_list;
 
@@ -8357,9 +8357,7 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
        if (!np->scripth0)
                goto attach_error;
 
-       init_timer(&np->timer);
-       np->timer.data     = (unsigned long) np;
-       np->timer.function = ncr53c8xx_timeout;
+       timer_setup(&np->timer, ncr53c8xx_timeout, 0);
 
        /* Try to map the controller chip to virtual and physical memory. */
 
index 0e294e80c1690f20941fd26a506d3273fe341853..947d6017d004c83b3e758392d6278524613a8621 100644 (file)
@@ -695,7 +695,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                task->task_proto = dev->tproto;
                memcpy(&task->ssp_task, parameter, para_len);
                task->task_done = pm8001_task_done;
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+               task->slow_task->timer.function = pm8001_tmf_timedout;
                task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
                add_timer(&task->slow_task->timer);
 
@@ -781,7 +781,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
                task->dev = dev;
                task->task_proto = dev->tproto;
                task->task_done = pm8001_task_done;
-               task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+               task->slow_task->timer.function = pm8001_tmf_timedout;
                task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
                add_timer(&task->slow_task->timer);
 
index 4f9f115fb6a0c8c9a3d3e5753d5d236c52897e60..e58be98430b014a40e1cd5eeba5472a39fe7419f 100644 (file)
@@ -604,7 +604,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
 
        cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
        cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
-       cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_bist_done;
+       cmd->timer.function = pmcraid_bist_done;
        add_timer(&cmd->timer);
 }
 
@@ -636,7 +636,7 @@ static void pmcraid_reset_alert_done(struct timer_list *t)
                /* restart timer if some more time is available to wait */
                cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
                cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
-               cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+               cmd->timer.function = pmcraid_reset_alert_done;
                add_timer(&cmd->timer);
        }
 }
@@ -673,7 +673,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
                 */
                cmd->time_left = PMCRAID_RESET_TIMEOUT;
                cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
-               cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+               cmd->timer.function = pmcraid_reset_alert_done;
                add_timer(&cmd->timer);
 
                iowrite32(DOORBELL_IOA_RESET_ALERT,
@@ -923,7 +923,7 @@ static void pmcraid_send_cmd(
        if (timeout_func) {
                /* setup timeout handler */
                cmd->timer.expires = jiffies + timeout;
-               cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+               cmd->timer.function = timeout_func;
                add_timer(&cmd->timer);
        }
 
@@ -1951,7 +1951,7 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
        cmd->cmd_done = pmcraid_ioa_reset;
        cmd->timer.expires = jiffies +
                             msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
-       cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_timeout_handler;
+       cmd->timer.function = pmcraid_timeout_handler;
 
        if (!timer_pending(&cmd->timer))
                add_timer(&cmd->timer);
index fe5a9ea27b5eb4ccb637f275ca8d013cff662f64..78d4aa8df675a1671df5daf12dfc7d9000f3cbe5 100644 (file)
@@ -22,7 +22,7 @@ struct scsi_dev_info_list {
        struct list_head dev_info_list;
        char vendor[8];
        char model[16];
-       unsigned flags;
+       blist_flags_t flags;
        unsigned compatible; /* for use with scsi_static_device_list entries */
 };
 
@@ -35,7 +35,7 @@ struct scsi_dev_info_list_table {
 
 
 static const char spaces[] = "                "; /* 16 of them */
-static unsigned scsi_default_dev_flags;
+static blist_flags_t scsi_default_dev_flags;
 static LIST_HEAD(scsi_dev_info_list);
 static char scsi_dev_flags[256];
 
@@ -52,7 +52,7 @@ static struct {
        char *vendor;
        char *model;
        char *revision; /* revision known to be bad, unused */
-       unsigned flags;
+       blist_flags_t flags;
 } scsi_static_device_list[] __initdata = {
        /*
         * The following devices are known not to tolerate a lun != 0 scan
@@ -335,7 +335,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
  * Returns: 0 OK, -error on failure.
  **/
 static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
-                           char *strflags, int flags)
+                           char *strflags, blist_flags_t flags)
 {
        return scsi_dev_info_list_add_keyed(compatible, vendor, model,
                                            strflags, flags,
@@ -361,7 +361,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
  * Returns: 0 OK, -error on failure.
  **/
 int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
-                                char *strflags, int flags, int key)
+                                char *strflags, blist_flags_t flags, int key)
 {
        struct scsi_dev_info_list *devinfo;
        struct scsi_dev_info_list_table *devinfo_table =
@@ -571,9 +571,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
  *     matching flags value, else return the host or global default
  *     settings.  Called during scan time.
  **/
-int scsi_get_device_flags(struct scsi_device *sdev,
-                         const unsigned char *vendor,
-                         const unsigned char *model)
+blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+                                   const unsigned char *vendor,
+                                   const unsigned char *model)
 {
        return scsi_get_device_flags_keyed(sdev, vendor, model,
                                           SCSI_DEVINFO_GLOBAL);
@@ -593,7 +593,7 @@ int scsi_get_device_flags(struct scsi_device *sdev,
  *     flags value, else return the host or global default settings.
  *     Called during scan time.
  **/
-int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
                                const unsigned char *vendor,
                                const unsigned char *model,
                                int key)
index df1368aea9a395a58361b604b6c17824b429a1ec..a5946cd64caa49cf91ac3571ab3e6355edef3604 100644 (file)
@@ -50,15 +50,16 @@ enum {
        SCSI_DEVINFO_SPI,
 };
 
-extern int scsi_get_device_flags(struct scsi_device *sdev,
-                                const unsigned char *vendor,
-                                const unsigned char *model);
-extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
-                                      const unsigned char *vendor,
-                                      const unsigned char *model, int key);
+extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+                                          const unsigned char *vendor,
+                                          const unsigned char *model);
+extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
+                                                const unsigned char *vendor,
+                                                const unsigned char *model,
+                                                int key);
 extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
                                        char *model, char *strflags,
-                                       int flags, int key);
+                                       blist_flags_t flags, int key);
 extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
 extern int scsi_dev_info_add_list(int key, const char *name);
 extern int scsi_dev_info_remove_list(int key);
index a0f2a20ea9e969b81362abbaea2e63c8a10ef3c6..be5e919db0e8cd9e713727a91bc46923673ea556 100644 (file)
@@ -566,7 +566,7 @@ EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
  *     are copied to the scsi_device any flags value is stored in *@bflags.
  **/
 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
-                         int result_len, int *bflags)
+                         int result_len, blist_flags_t *bflags)
 {
        unsigned char scsi_cmd[MAX_COMMAND_SIZE];
        int first_inquiry_len, try_inquiry_len, next_inquiry_len;
index d32e3ba8863e86ef9e6ca55c5311697683050c16..791a2182de53592ddc70fe1661a2969a1de5713c 100644 (file)
@@ -565,9 +565,9 @@ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
 /*
  *  Linux entry point of the timer handler
  */
-static void sym53c8xx_timer(unsigned long npref)
+static void sym53c8xx_timer(struct timer_list *t)
 {
-       struct sym_hcb *np = (struct sym_hcb *)npref;
+       struct sym_hcb *np = from_timer(np, t, s.timer);
        unsigned long flags;
 
        spin_lock_irqsave(np->s.host->host_lock, flags);
@@ -1351,9 +1351,7 @@ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
        /*
         *  Start the timer daemon
         */
-       init_timer(&np->s.timer);
-       np->s.timer.data     = (unsigned long) np;
-       np->s.timer.function = sym53c8xx_timer;
+       timer_setup(&np->s.timer, sym53c8xx_timer, 0);
        np->s.lasttime=0;
        sym_timer (np);
 
index 609332b3e15b5415b589eb099620d4961f189dc0..c462b1c046cd4709bdf5b4b6d93081966c197490 100644 (file)
@@ -293,9 +293,9 @@ static void gb_operation_work(struct work_struct *work)
        gb_operation_put(operation);
 }
 
-static void gb_operation_timeout(unsigned long arg)
+static void gb_operation_timeout(struct timer_list *t)
 {
-       struct gb_operation *operation = (void *)arg;
+       struct gb_operation *operation = from_timer(operation, t, timer);
 
        if (gb_operation_result_set(operation, -ETIMEDOUT)) {
                /*
@@ -540,8 +540,7 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
                        goto err_request;
                }
 
-               setup_timer(&operation->timer, gb_operation_timeout,
-                           (unsigned long)operation);
+               timer_setup(&operation->timer, gb_operation_timeout, 0);
        }
 
        operation->flags = op_flags;
index a6635f0afae9269501ff7ceee11b8459a8adf61d..6dab15f5dae1b99b33559e9914400d015cef5baf 100644 (file)
@@ -75,7 +75,7 @@ struct lap_cb;
 static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
                                    void (*callback)(struct timer_list *))
 {
-       ptimer->function = (TIMER_FUNC_TYPE) callback;
+       ptimer->function =  callback;
 
        /* Set new value for timer (update or add timer).
         * We use mod_timer() because it's more efficient and also
index 3c83aa31e2c208426e3396ba9d02a2ac3f273f3f..5a5d1811ffbeac436dd02fd8fc5700569ae5ef25 100644 (file)
@@ -700,9 +700,9 @@ lnet_delay_rule_daemon(void *arg)
 }
 
 static void
-delay_timer_cb(unsigned long arg)
+delay_timer_cb(struct timer_list *t)
 {
-       struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+       struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer);
 
        spin_lock_bh(&delay_dd.dd_lock);
        if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
@@ -762,7 +762,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
                wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
        }
 
-       setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
+       timer_setup(&rule->dl_timer, delay_timer_cb, 0);
 
        spin_lock_init(&rule->dl_lock);
        INIT_LIST_HEAD(&rule->dl_msg_list);
index 2d6e64dea2660228855afd4ad0491c4a63e01b91..938b859b6650b2e4b48ebad34d75081a3595bd2e 100644 (file)
@@ -1016,7 +1016,7 @@ static bool file_is_noatime(const struct file *file)
        if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
                return true;
 
-       if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+       if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
                return true;
 
        return false;
index 65ac5128f0057468c8bada13282313005a566794..8666f1e81ade7ad24e2e60760ac03aa4c1d1dad0 100644 (file)
@@ -313,11 +313,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        }
 
        if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
                sbi->ll_flags |= LL_SBI_ACL;
        } else {
                LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
-               sb->s_flags &= ~MS_POSIXACL;
+               sb->s_flags &= ~SB_POSIXACL;
                sbi->ll_flags &= ~LL_SBI_ACL;
        }
 
@@ -660,7 +660,7 @@ void ll_kill_super(struct super_block *sb)
        struct ll_sb_info *sbi;
 
        /* not init sb ?*/
-       if (!(sb->s_flags & MS_ACTIVE))
+       if (!(sb->s_flags & SB_ACTIVE))
                return;
 
        sbi = ll_s2sbi(sb);
@@ -2039,8 +2039,8 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
        int err;
        __u32 read_only;
 
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
-               read_only = *flags & MS_RDONLY;
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+               read_only = *flags & SB_RDONLY;
                err = obd_set_info_async(NULL, sbi->ll_md_exp,
                                         sizeof(KEY_READ_ONLY),
                                         KEY_READ_ONLY, sizeof(read_only),
@@ -2053,9 +2053,9 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
                }
 
                if (read_only)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                else
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
 
                if (sbi->ll_flags & LL_SBI_VERBOSE)
                        LCONSOLE_WARN("Remounted %s %s\n", profilenm,
index 23cdb7c4476c9480b37e30a1ea2549724b51d319..63be6e7273f3548343c4b30991172a152599e90d 100644 (file)
@@ -329,11 +329,11 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
        return -1;
 }
 
-static void ptlrpc_at_timer(unsigned long castmeharder)
+static void ptlrpc_at_timer(struct timer_list *t)
 {
        struct ptlrpc_service_part *svcpt;
 
-       svcpt = (struct ptlrpc_service_part *)castmeharder;
+       svcpt = from_timer(svcpt, t, scp_at_timer);
 
        svcpt->scp_at_check = 1;
        svcpt->scp_at_checktime = cfs_time_current();
@@ -506,8 +506,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        if (!array->paa_reqs_count)
                goto free_reqs_array;
 
-       setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
-                   (unsigned long)svcpt);
+       timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0);
 
        /* At SOW, service time should be quick; 10s seems generous. If client
         * timeout is less than this, we'll be sending an early reply.
@@ -926,7 +925,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
        next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
                       at_early_margin);
        if (next <= 0) {
-               ptlrpc_at_timer((unsigned long)svcpt);
+               ptlrpc_at_timer(&svcpt->scp_at_timer);
        } else {
                mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
                CDEBUG(D_INFO, "armed %s at %+ds\n",
index 0790b3d9e25560366a48633ddce37beeea21e762..143038c6c403f0085fd7a8327a6682a67e559858 100644 (file)
@@ -293,9 +293,9 @@ static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
  * EOF timeout timer function. This is an unrecoverable condition
  * without a stream restart.
  */
-static void prp_eof_timeout(unsigned long data)
+static void prp_eof_timeout(struct timer_list *t)
 {
-       struct prp_priv *priv = (struct prp_priv *)data;
+       struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
        struct imx_media_video_dev *vdev = priv->vdev;
        struct imx_ic_priv *ic_priv = priv->ic_priv;
 
@@ -1292,8 +1292,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
        priv->ic_priv = ic_priv;
 
        spin_lock_init(&priv->irqlock);
-       setup_timer(&priv->eof_timeout_timer, prp_eof_timeout,
-                   (unsigned long)priv);
+       timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
 
        priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
                                                   PRPENCVF_SRC_PAD);
index 6d856118c223285702913468b29200e26c448a7c..bb1d6dafca83473eea250ff24be74b1505c9b533 100644 (file)
@@ -254,9 +254,9 @@ static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
  * EOF timeout timer function. This is an unrecoverable condition
  * without a stream restart.
  */
-static void csi_idmac_eof_timeout(unsigned long data)
+static void csi_idmac_eof_timeout(struct timer_list *t)
 {
-       struct csi_priv *priv = (struct csi_priv *)data;
+       struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
        struct imx_media_video_dev *vdev = priv->vdev;
 
        v4l2_err(&priv->sd, "EOF timeout\n");
@@ -1739,8 +1739,7 @@ static int imx_csi_probe(struct platform_device *pdev)
        priv->csi_id = pdata->csi;
        priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
 
-       setup_timer(&priv->eof_timeout_timer, csi_idmac_eof_timeout,
-                   (unsigned long)priv);
+       timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
        spin_lock_init(&priv->irqlock);
 
        v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
index 85775da293fb10d067d7956af1268eabd5727893..667dacac81f03ed7902807790f26f7316b08e34b 100644 (file)
@@ -744,9 +744,9 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
  * The handler runs in interrupt context. That's why we need to defer the
  * tasks to a work queue.
  */
-static void link_stat_timer_handler(unsigned long data)
+static void link_stat_timer_handler(struct timer_list *t)
 {
-       struct most_dev *mdev = (struct most_dev *)data;
+       struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
 
        schedule_work(&mdev->poll_work_obj);
        mdev->link_stat_timer.expires = jiffies + (2 * HZ);
@@ -1138,8 +1138,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
        num_endpoints = usb_iface_desc->desc.bNumEndpoints;
        mutex_init(&mdev->io_mutex);
        INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
-       setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
-                   (unsigned long)mdev);
+       timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
 
        mdev->usb_device = usb_dev;
        mdev->link_stat_timer.expires = jiffies + (2 * HZ);
index 4e7908322d77d1985989a0688096d975e0d41c6f..f56fdc7a4b614bd5732bfe9fac96017a43d49346 100644 (file)
@@ -391,10 +391,10 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
 }
 
 
-static void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(struct timer_list *t)
 {
        struct ieee80211_device *ieee =
-               (struct ieee80211_device *) _ieee;
+               from_timer(ieee, t, beacon_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&ieee->beacon_lock, flags);
@@ -1251,9 +1251,11 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
        spin_unlock_irqrestore(&ieee->lock, flags);
 }
 
-static void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(struct timer_list *t)
 {
-       ieee80211_associate_abort((struct ieee80211_device *) dev);
+       struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
+
+       ieee80211_associate_abort(dev);
 }
 
 
@@ -2718,11 +2720,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
        ieee->enable_rx_imm_BA = true;
        ieee->tx_pending.txb = NULL;
 
-       setup_timer(&ieee->associate_timer, ieee80211_associate_abort_cb,
-                   (unsigned long)ieee);
+       timer_setup(&ieee->associate_timer, ieee80211_associate_abort_cb, 0);
 
-       setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
-                   (unsigned long)ieee);
+       timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
 
 
        INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
index 576c15d25a0f80a53c0891f40331400d8f0b4c47..986a55bb9877e161d02209a04fb19e9eaea27dce 100644 (file)
@@ -138,17 +138,16 @@ _recv_indicatepkt_drop:
        precvpriv->rx_drop++;
 }
 
-static void _r8712_reordering_ctrl_timeout_handler (unsigned long data)
+static void _r8712_reordering_ctrl_timeout_handler (struct timer_list *t)
 {
        struct recv_reorder_ctrl *preorder_ctrl =
-                        (struct recv_reorder_ctrl *)data;
+                        from_timer(preorder_ctrl, t, reordering_ctrl_timer);
 
        r8712_reordering_ctrl_timeout_handler(preorder_ctrl);
 }
 
 void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
 {
-       setup_timer(&preorder_ctrl->reordering_ctrl_timer,
-                    _r8712_reordering_ctrl_timeout_handler,
-                    (unsigned long)preorder_ctrl);
+       timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+                   _r8712_reordering_ctrl_timeout_handler, 0);
 }
index da1d4a641dcd2cd33935532c470dc6db6a373777..455fba721135a8bad731abd399712144b84307da 100644 (file)
@@ -74,7 +74,7 @@ enum _LED_STATE_871x {
  *     Prototype of protected function.
  *===========================================================================
  */
-static void BlinkTimerCallback(unsigned long data);
+static void BlinkTimerCallback(struct timer_list *t);
 
 static void BlinkWorkItemCallback(struct work_struct *work);
 /*===========================================================================
@@ -99,8 +99,7 @@ static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
        pLed->bLedBlinkInProgress = false;
        pLed->BlinkTimes = 0;
        pLed->BlinkingLedState = LED_UNKNOWN;
-       setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
-                   (unsigned long)pLed);
+       timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
        INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
 }
 
@@ -825,9 +824,9 @@ static void SwLedBlink6(struct LED_871x *pLed)
  *             Callback function of LED BlinkTimer,
  *             it just schedules to corresponding BlinkWorkItem.
  */
-static void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
 {
-       struct LED_871x  *pLed = (struct LED_871x *)data;
+       struct LED_871x  *pLed = from_timer(pLed, t, BlinkTimer);
 
        /* This fixed the crash problem on Fedora 12 when trying to do the
         * insmod;ifconfig up;rmmod commands.
index 16497202473fecde563f1da430e68b757a352a2f..aae868509e1302958bfcf13a029bd790f61692ec 100644 (file)
@@ -1164,7 +1164,7 @@ static void spkup_write(const u16 *in_buf, int count)
 static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
 
 static void read_all_doc(struct vc_data *vc);
-static void cursor_done(u_long data);
+static void cursor_done(struct timer_list *unused);
 static DEFINE_TIMER(cursor_timer, cursor_done);
 
 static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
@@ -1682,7 +1682,7 @@ static int speak_highlight(struct vc_data *vc)
        return 0;
 }
 
-static void cursor_done(u_long data)
+static void cursor_done(struct timer_list *unused)
 {
        struct vc_data *vc = vc_cons[cursor_con].d;
        unsigned long flags;
index 6ddd3fc3f08d15d5aff35e296ee13868eb1ecd74..aac29c816d09a54e58257abae1fb42624fb51d7b 100644 (file)
@@ -153,7 +153,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
 }
 EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart);
 
-static void thread_wake_up(u_long data)
+static void thread_wake_up(struct timer_list *unused)
 {
        wake_up_interruptible_all(&speakup_event);
 }
index b604d0cccef12f2bf70b130c7f32d4d356fd3baf..6cb6eb0673c6da0a2f03473e02bde28196a74a9c 100644 (file)
@@ -493,9 +493,9 @@ static const struct file_operations bus_info_debugfs_fops = {
        .release = single_release,
 };
 
-static void dev_periodic_work(unsigned long __opaque)
+static void dev_periodic_work(struct timer_list *t)
 {
-       struct visor_device *dev = (struct visor_device *)__opaque;
+       struct visor_device *dev = from_timer(dev, t, timer);
        struct visor_driver *drv = to_visor_driver(dev->device.driver);
 
        drv->channel_interrupt(dev);
@@ -667,7 +667,7 @@ int create_visor_device(struct visor_device *dev)
        dev->device.release = visorbus_release_device;
        /* keep a reference just for us (now 2) */
        get_device(&dev->device);
-       setup_timer(&dev->timer, dev_periodic_work, (unsigned long)dev);
+       timer_setup(&dev->timer, dev_periodic_work, 0);
        /*
         * bus_id must be a unique name with respect to this bus TYPE (NOT bus
         * instance).  That's why we need to include the bus number within the
index 735d7e5fa86b0143a9d88e42fcb20f33048d49f1..6d8239163ba55452e0e7a4708b14bd7ebec5c785 100644 (file)
@@ -1766,9 +1766,10 @@ static int visornic_poll(struct napi_struct *napi, int budget)
  * Main function of the vnic_incoming thread. Periodically check the response
  * queue and drain it if needed.
  */
-static void poll_for_irq(unsigned long v)
+static void poll_for_irq(struct timer_list *t)
 {
-       struct visornic_devdata *devdata = (struct visornic_devdata *)v;
+       struct visornic_devdata *devdata = from_timer(devdata, t,
+                                                     irq_poll_timer);
 
        if (!visorchannel_signalempty(
                                   devdata->dev->visorchannel,
@@ -1899,8 +1900,7 @@ static int visornic_probe(struct visor_device *dev)
        /* Let's start our threads to get responses */
        netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
 
-       setup_timer(&devdata->irq_poll_timer, poll_for_irq,
-                   (unsigned long)devdata);
+       timer_setup(&devdata->irq_poll_timer, poll_for_irq, 0);
        /* Note: This time has to start running before the while
         * loop below because the napi routine is responsible for
         * setting enab_dis_acked
index 8a275996d4e63a6eddccc6df30a152d72aab2813..028da1dc1b818380bd36c71b4cf403cce5523b45 100644 (file)
@@ -267,7 +267,7 @@ static void update_scan_time(void)
                last_scanned_shadow[i].time_scan = jiffies;
 }
 
-static void remove_network_from_shadow(unsigned long unused)
+static void remove_network_from_shadow(struct timer_list *unused)
 {
        unsigned long now = jiffies;
        int i, j;
@@ -292,7 +292,7 @@ static void remove_network_from_shadow(unsigned long unused)
        }
 }
 
-static void clear_duringIP(unsigned long arg)
+static void clear_duringIP(struct timer_list *unused)
 {
        wilc_optaining_ip = false;
 }
@@ -2278,8 +2278,8 @@ int wilc_init_host_int(struct net_device *net)
 
        priv = wdev_priv(net->ieee80211_ptr);
        if (op_ifcs == 0) {
-               setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
-               setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
+               timer_setup(&hAgingTimer, remove_network_from_shadow, 0);
+               timer_setup(&wilc_during_ip_timer, clear_duringIP, 0);
        }
        op_ifcs++;
 
index 90388698c222996d4d9db8901d876adb1cb0b700..417b9e66b0cd0b9017dfe67b3dd9bb60ae257e80 100644 (file)
@@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
        CSK_LOGIN_PDU_DONE,
        CSK_LOGIN_DONE,
        CSK_DDP_ENABLE,
+       CSK_ABORT_RPL_WAIT,
 };
 
 struct cxgbit_sock_common {
@@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
 int cxgbit_setup_conn_digest(struct cxgbit_sock *);
 int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
 void cxgbit_free_conn(struct iscsi_conn *);
 extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
 int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
index d4fa41be80f9a1719574af28c8981ef8e8d287ca..92eb57e2adaf555fbb4f1938eda4bc78366eb505 100644 (file)
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 }
 
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+       __kfree_skb(skb);
+
+       if (csk->com.state != CSK_STATE_ESTABLISHED)
+               goto no_abort;
+
+       set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+       csk->com.state = CSK_STATE_ABORTING;
+
+       cxgbit_send_abort_req(csk);
+
+       return;
+
+no_abort:
+       cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+       cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+       struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+       cxgbit_get_csk(csk);
+       cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+       spin_lock_bh(&csk->lock);
+       if (csk->lock_owner) {
+               cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+               __skb_queue_tail(&csk->backlogq, skb);
+       } else {
+               __cxgbit_abort_conn(csk, skb);
+       }
+       spin_unlock_bh(&csk->lock);
+
+       cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+                             csk->tid, 600, __func__);
+}
+
 void cxgbit_free_conn(struct iscsi_conn *conn)
 {
        struct cxgbit_sock *csk = conn->context;
@@ -1709,12 +1749,17 @@ rel_skb:
 
 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
 {
+       struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
        pr_debug("%s: csk %p; tid %u; state %d\n",
                 __func__, csk, csk->tid, csk->com.state);
 
        switch (csk->com.state) {
        case CSK_STATE_ABORTING:
                csk->com.state = CSK_STATE_DEAD;
+               if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+                       cxgbit_wake_up(&csk->com.wr_wait, __func__,
+                                      rpl->status);
                cxgbit_put_csk(csk);
                break;
        default:
index 5fdb57cac96874c18a87a0ec9ecfb0449ecc6aae..768cce0ccb807518f32b3d72d875915e9391fdfd 100644 (file)
@@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                        struct cxgbit_device *cdev = csk->com.cdev;
                        struct cxgbi_ppm *ppm = cdev2ppm(cdev);
 
+                       /* Abort the TCP conn if DDP is not complete to
+                        * avoid any possibility of DDP after freeing
+                        * the cmd.
+                        */
+                       if (unlikely(cmd->write_data_done !=
+                                    cmd->se_cmd.data_length))
+                               cxgbit_abort_conn(csk);
+
                        cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
 
                        dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
index 4fd775ace541a978b258fc8f3b26f3c8108c23f3..f3f8856bfb68e8446ad19a0bf8156217f42cacab 100644 (file)
@@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
        case CPL_RX_ISCSI_DDP:
        case CPL_FW4_ACK:
                lro_flush = false;
+               /* fall through */
        case CPL_ABORT_RPL_RSS:
        case CPL_PASS_ESTABLISH:
        case CPL_PEER_CLOSE:
index 9e67c7678c86d2af0aa4617a40686103efa55fd2..9eb10d34682cfb23dc65a00092f519d2f46657e4 100644 (file)
@@ -502,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 EXPORT_SYMBOL(iscsit_aborted_task);
 
 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
-                                     u32, u32, u8 *, u8 *);
+                                     u32, u32, const void *, void *);
 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
 
 static int
@@ -523,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
                iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                          ISCSI_HDR_LEN, 0, NULL,
-                                         (u8 *)header_digest);
+                                         header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -550,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                if (conn->conn_ops->DataDigest) {
                        iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                                  data_buf, data_buf_len,
-                                                 padding,
-                                                 (u8 *)&cmd->pad_bytes,
-                                                 (u8 *)&cmd->data_crc);
+                                                 padding, &cmd->pad_bytes,
+                                                 &cmd->data_crc);
 
                        iov[niov].iov_base = &cmd->data_crc;
                        iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -597,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
                iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
                                          ISCSI_HDR_LEN, 0, NULL,
-                                         (u8 *)header_digest);
+                                         header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -836,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
        unsigned char *buf)
 {
        struct iscsi_conn *conn;
+       const bool do_put = cmd->se_cmd.se_tfo != NULL;
 
        if (!cmd->conn) {
                pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -866,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
         * Perform the kref_put now if se_cmd has already been setup by
         * scsit_setup_scsi_cmd()
         */
-       if (cmd->se_cmd.se_tfo != NULL) {
+       if (do_put) {
                pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
                target_put_sess_cmd(&cmd->se_cmd);
        }
@@ -1410,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
        return data_crc;
 }
 
-static void iscsit_do_crypto_hash_buf(
-       struct ahash_request *hash,
-       const void *buf,
-       u32 payload_length,
-       u32 padding,
-       u8 *pad_bytes,
-       u8 *data_crc)
+static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
+       const void *buf, u32 payload_length, u32 padding,
+       const void *pad_bytes, void *data_crc)
 {
        struct scatterlist sg[2];
 
@@ -1462,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
        iscsit_mod_dataout_timer(cmd);
 
        if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
-               pr_err("DataOut Offset: %u, Length %u greater than"
-                       " iSCSI Command EDTL %u, protocol error.\n",
-                       hdr->offset, payload_length, cmd->se_cmd.data_length);
+               pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+                      be32_to_cpu(hdr->offset), payload_length,
+                      cmd->se_cmd.data_length);
                return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
        }
 
@@ -1878,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                }
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
-                                       ping_data, payload_length,
-                                       padding, cmd->pad_bytes,
-                                       (u8 *)&data_crc);
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
+                                                 payload_length, padding,
+                                                 cmd->pad_bytes, &data_crc);
 
                        if (checksum != data_crc) {
                                pr_err("Ping data CRC32C DataDigest"
@@ -1962,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        struct iscsi_tmr_req *tmr_req;
        struct iscsi_tm *hdr;
        int out_of_order_cmdsn = 0, ret;
-       bool sess_ref = false;
        u8 function, tcm_function = TMR_UNKNOWN;
 
        hdr                     = (struct iscsi_tm *) buf;
@@ -1995,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        cmd->data_direction = DMA_NONE;
        cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
-       if (!cmd->tmr_req)
+       if (!cmd->tmr_req) {
                return iscsit_add_reject_cmd(cmd,
                                             ISCSI_REASON_BOOKMARK_NO_RESOURCES,
                                             buf);
+       }
+
+       transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+                             conn->sess->se_sess, 0, DMA_NONE,
+                             TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+       target_get_sess_cmd(&cmd->se_cmd, true);
 
        /*
         * TASK_REASSIGN for ERL=2 / connection stays inside of
         * LIO-Target $FABRIC_MOD
         */
        if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
-               transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
-                                     conn->sess->se_sess, 0, DMA_NONE,
-                                     TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
-               target_get_sess_cmd(&cmd->se_cmd, true);
-               sess_ref = true;
                tcm_function = iscsit_convert_tmf(function);
                if (tcm_function == TMR_UNKNOWN) {
                        pr_err("Unknown iSCSI TMR Function:"
@@ -2101,12 +2096,14 @@ attach:
 
        if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
                int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
-               if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+               if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
                        out_of_order_cmdsn = 1;
-               else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+               } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+                       target_put_sess_cmd(&cmd->se_cmd);
                        return 0;
-               else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+               } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
                        return -1;
+               }
        }
        iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
@@ -2126,12 +2123,8 @@ attach:
         * For connection recovery, this is also the default action for
         * TMR TASK_REASSIGN.
         */
-       if (sess_ref) {
-               pr_debug("Handle TMR, using sess_ref=true check\n");
-               target_put_sess_cmd(&cmd->se_cmd);
-       }
-
        iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+       target_put_sess_cmd(&cmd->se_cmd);
        return 0;
 }
 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
@@ -2287,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                        goto reject;
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
-                                       text_in, payload_length,
-                                       padding, (u8 *)&pad_bytes,
-                                       (u8 *)&data_crc);
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
+                                                 payload_length, padding,
+                                                 &pad_bytes, &data_crc);
 
                        if (checksum != data_crc) {
                                pr_err("Text data CRC32C DataDigest"
@@ -3978,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                                return;
                        }
 
-                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
-                                       buffer, ISCSI_HDR_LEN,
-                                       0, NULL, (u8 *)&checksum);
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
+                                                 ISCSI_HDR_LEN, 0, NULL,
+                                                 &checksum);
 
                        if (digest != checksum) {
                                pr_err("HeaderDigest CRC32C failed,"
index 0dd4c45f7575a2795f3987dd874ac4891d93184f..0ebc4818e132ade606a77e8e46b46e183e111ddd 100644 (file)
@@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
 
        ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
        if (ret < 0)
-               return NULL;
+               goto free_out;
 
        ret = iscsit_tpg_add_portal_group(tiqn, tpg);
        if (ret != 0)
@@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
        return &tpg->tpg_se_tpg;
 out:
        core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
        kfree(tpg);
        return NULL;
 }
index 76184094a0cf944efc26c0aa32626b71e56b07b8..5efa42b939a104052f4fe0ea8ec94d09961cd8c0 100644 (file)
@@ -34,7 +34,7 @@
 #include "iscsi_target_erl2.h"
 #include "iscsi_target.h"
 
-#define OFFLOAD_BUF_SIZE       32768
+#define OFFLOAD_BUF_SIZE       32768U
 
 /*
  *     Used to dump excess datain payload for certain error recovery
@@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
        if (conn->sess->sess_ops->RDMAExtensions)
                return 0;
 
-       length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+       length = min(buf_len, OFFLOAD_BUF_SIZE);
 
        buf = kzalloc(length, GFP_ATOMIC);
        if (!buf) {
@@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
        memset(&iov, 0, sizeof(struct kvec));
 
        while (offset < buf_len) {
-               size = ((offset + length) > buf_len) ?
-                       (buf_len - offset) : length;
+               size = min(buf_len - offset, length);
 
                iov.iov_len = size;
                iov.iov_base = buf;
index caab1045742dfc659ac906abfffaa4c238f152e4..29a37b242d30a3f225f52ea9f34a00e412f02d95 100644 (file)
@@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
                char *key, *value;
                struct iscsi_param *param;
 
-               if (iscsi_extract_key_value(start, &key, &value) < 0) {
-                       kfree(tmpbuf);
-                       return -1;
-               }
+               if (iscsi_extract_key_value(start, &key, &value) < 0)
+                       goto free_buffer;
 
                pr_debug("Got key: %s=%s\n", key, value);
 
@@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
 
                param = iscsi_check_key(key, phase, sender, param_list);
                if (!param) {
-                       if (iscsi_add_notunderstood_response(key,
-                                       value, param_list) < 0) {
-                               kfree(tmpbuf);
-                               return -1;
-                       }
+                       if (iscsi_add_notunderstood_response(key, value,
+                                                            param_list) < 0)
+                               goto free_buffer;
+
                        start += strlen(key) + strlen(value) + 2;
                        continue;
                }
-               if (iscsi_check_value(param, value) < 0) {
-                       kfree(tmpbuf);
-                       return -1;
-               }
+               if (iscsi_check_value(param, value) < 0)
+                       goto free_buffer;
 
                start += strlen(key) + strlen(value) + 2;
 
                if (IS_PSTATE_PROPOSER(param)) {
-                       if (iscsi_check_proposer_state(param, value) < 0) {
-                               kfree(tmpbuf);
-                               return -1;
-                       }
+                       if (iscsi_check_proposer_state(param, value) < 0)
+                               goto free_buffer;
+
                        SET_PSTATE_RESPONSE_GOT(param);
                } else {
-                       if (iscsi_check_acceptor_state(param, value, conn) < 0) {
-                               kfree(tmpbuf);
-                               return -1;
-                       }
+                       if (iscsi_check_acceptor_state(param, value, conn) < 0)
+                               goto free_buffer;
+
                        SET_PSTATE_ACCEPTOR(param);
                }
        }
 
        kfree(tmpbuf);
        return 0;
+
+free_buffer:
+       kfree(tmpbuf);
+       return -1;
 }
 
 int iscsi_encode_text_output(
index e446a09c886b1a2ca1344d87f755806b237fe406..f65e5e584212faa2fbe360bb345d9c422acb4457 100644 (file)
@@ -25,8 +25,6 @@
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_seq_pdu_list.h"
 
-#define OFFLOAD_BUF_SIZE       32768
-
 #ifdef DEBUG
 static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
 {
index 594d07a1e995ec87d467f4286a1d8668f2e32e3d..4b34f71547c689e21a98530c8eb1417b0de20cc7 100644 (file)
@@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
         */
        param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
        if (!param)
-               goto out;
+               goto free_pl_out;
 
        if (iscsi_update_param_value(param, "CHAP,None") < 0)
-               goto out;
+               goto free_pl_out;
 
        tpg->tpg_attrib.authentication = 0;
 
@@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
        pr_debug("CORE[0] - Allocated Discovery TPG\n");
 
        return 0;
+free_pl_out:
+       iscsi_release_param_list(tpg->param_list);
 out:
        if (tpg->sid == 1)
                core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
        if (!tpg)
                return;
 
+       iscsi_release_param_list(tpg->param_list);
        core_tpg_deregister(&tpg->tpg_se_tpg);
 
        kfree(tpg);
index 54f20f184dd6b5c8422f8e72f81e173c70d5efd0..4435bf374d2d55fd79d9dc75dbd086a7ba6dbc28 100644 (file)
@@ -695,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
        struct iscsi_session *sess;
        struct se_cmd *se_cmd = &cmd->se_cmd;
 
+       WARN_ON(!list_empty(&cmd->i_conn_node));
+
        if (cmd->conn)
                sess = cmd->conn->sess;
        else
@@ -717,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
 {
        struct iscsi_conn *conn = cmd->conn;
 
+       WARN_ON(!list_empty(&cmd->i_conn_node));
+
        if (cmd->data_direction == DMA_TO_DEVICE) {
                iscsit_stop_dataout_timer(cmd);
                iscsit_free_r2ts_from_list(cmd);
index 928127642574b2d4b90592dfcd3688477a7d7a96..e46ca968009c06a2958e347104168cca32c37278 100644 (file)
@@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
 {
        unsigned char *md_buf;
        struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
-       char path[ALUA_METADATA_PATH_LEN];
+       char *path;
        int len, rc;
 
        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
@@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
                return -ENOMEM;
        }
 
-       memset(path, 0, ALUA_METADATA_PATH_LEN);
-
        len = snprintf(md_buf, ALUA_MD_BUF_LEN,
                        "tg_pt_gp_id=%hu\n"
                        "alua_access_state=0x%02x\n"
@@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
                        tg_pt_gp->tg_pt_gp_alua_access_state,
                        tg_pt_gp->tg_pt_gp_alua_access_status);
 
-       snprintf(path, ALUA_METADATA_PATH_LEN,
-               "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
-               config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
-       rc = core_alua_write_tpg_metadata(path, md_buf, len);
+       rc = -ENOMEM;
+       path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+                       &wwn->unit_serial[0],
+                       config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+       if (path) {
+               rc = core_alua_write_tpg_metadata(path, md_buf, len);
+               kfree(path);
+       }
        kfree(md_buf);
        return rc;
 }
@@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
 {
        struct se_portal_group *se_tpg = lun->lun_tpg;
        unsigned char *md_buf;
-       char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+       char *path;
        int len, rc;
 
        mutex_lock(&lun->lun_tg_pt_md_mutex);
@@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
                goto out_unlock;
        }
 
-       memset(path, 0, ALUA_METADATA_PATH_LEN);
-       memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
-
-       len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
-                       se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
-
-       if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
-               snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
-                               se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
                        "alua_tg_pt_status=0x%02x\n",
                        atomic_read(&lun->lun_tg_pt_secondary_offline),
                        lun->lun_tg_pt_secondary_stat);
 
-       snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
-                       db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
-                       lun->unpacked_lun);
+       if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+               path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+                               db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+                               se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+                               se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+                               lun->unpacked_lun);
+       } else {
+               path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+                               db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+                               se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+                               lun->unpacked_lun);
+       }
+       if (!path) {
+               rc = -ENOMEM;
+               goto out_free;
+       }
 
        rc = core_alua_write_tpg_metadata(path, md_buf, len);
+       kfree(path);
+out_free:
        kfree(md_buf);
-
 out_unlock:
        mutex_unlock(&lun->lun_tg_pt_md_mutex);
        return rc;
index 1902cb5c3b52c32de28290199012e9c2ae500e4a..fc9637cce82564dfb2d8b83b160cf1cd8c8b5760 100644 (file)
  */
 #define ALUA_DEFAULT_IMPLICIT_TRANS_SECS                       0
 #define ALUA_MAX_IMPLICIT_TRANS_SECS                   255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN                         512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN                        256
 
 /* Used by core_alua_update_tpg_(primary,secondary)_metadata */
 #define ALUA_MD_BUF_LEN                                        1024
index bd87cc26c6e500cdb813732c291f4d33bbc46964..72b1cd1bf9d9fdcfc64084f0177df66230e77450 100644 (file)
@@ -1611,12 +1611,12 @@ static match_table_t tokens = {
        {Opt_res_type, "res_type=%d"},
        {Opt_res_scope, "res_scope=%d"},
        {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
-       {Opt_mapped_lun, "mapped_lun=%lld"},
+       {Opt_mapped_lun, "mapped_lun=%u"},
        {Opt_target_fabric, "target_fabric=%s"},
        {Opt_target_node, "target_node=%s"},
        {Opt_tpgt, "tpgt=%d"},
        {Opt_port_rtpi, "port_rtpi=%d"},
-       {Opt_target_lun, "target_lun=%lld"},
+       {Opt_target_lun, "target_lun=%u"},
        {Opt_err, NULL}
 };
 
@@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
                        }
                        break;
                case Opt_sa_res_key:
-                       ret = kstrtoull(args->from, 0, &tmp_ll);
+                       ret = match_u64(args,  &tmp_ll);
                        if (ret < 0) {
                                pr_err("kstrtoull() failed for sa_res_key=\n");
                                goto out;
@@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
                        all_tg_pt = (int)arg;
                        break;
                case Opt_mapped_lun:
-                       ret = match_int(args, &arg);
+                       ret = match_u64(args, &tmp_ll);
                        if (ret)
                                goto out;
-                       mapped_lun = (u64)arg;
+                       mapped_lun = (u64)tmp_ll;
                        break;
                /*
                 * PR APTPL Metadata for Target Port
@@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
                                goto out;
                        break;
                case Opt_target_lun:
-                       ret = match_int(args, &arg);
+                       ret = match_u64(args, &tmp_ll);
                        if (ret)
                                goto out;
-                       target_lun = (u64)arg;
+                       target_lun = (u64)tmp_ll;
                        break;
                default:
                        break;
index e9e917cc6441913326316e75cc42dab99cf353c3..e1416b007aa43e0dcbd40d3b4d5e720c42e355c4 100644 (file)
@@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
        NULL,
 };
 
-extern struct configfs_item_operations target_core_dev_item_ops;
-
 static int target_fabric_port_link(
        struct config_item *lun_ci,
        struct config_item *se_dev_ci)
index c629817a8854bea49a18c6b3f93f3f8923f99079..9b2c0c773022c0013de3bbce849ca78af117b2c5 100644 (file)
@@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
        struct inode *inode = file->f_mapping->host;
        int ret;
 
+       if (!nolb) {
+               return 0;
+       }
+
        if (cmd->se_dev->dev_attrib.pi_prot_type) {
                ret = fd_do_prot_unmap(cmd, lba, nolb);
                if (ret)
index 18e3eb16e756735f7fe8028715c4f090979dcb2a..9384d19a7326c81274e589a58cd0a56b4f8bb98a 100644 (file)
@@ -89,6 +89,7 @@ int   target_for_each_device(int (*fn)(struct se_device *dev, void *data),
                               void *data);
 
 /* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
 void   target_setup_backend_cits(struct target_backend *);
 
 /* target_core_fabric_configfs.c */
index dd2cd8048582ce7520661b5ec3477a8e431b0f86..b024613f921718a40c761ea0f8eb7befe28dd168 100644 (file)
@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
        char *buf,
        u32 size)
 {
-       if (!pr_reg->isid_present_at_reg)
+       if (!pr_reg->isid_present_at_reg) {
                buf[0] = '\0';
+               return;
+       }
 
        snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
 }
@@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
                break;
        case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
                we = 1;
+               /* fall through */
        case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
                /*
                 * Some commands are only allowed for registered I_T Nexuses.
@@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
                break;
        case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
                we = 1;
+               /* fall through */
        case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
                /*
                 * Each registered I_T Nexus is a reservation holder.
@@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
 
        buf = transport_kmap_data_sg(cmd);
        if (!buf) {
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                goto out;
        }
 
@@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       ret = TCM_INVALID_PARAMETER_LIST;
+                       ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                        goto out_unmap;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
        struct t10_wwn *wwn = &dev->t10_wwn;
        struct file *file;
        int flags = O_RDWR | O_CREAT | O_TRUNC;
-       char path[512];
+       char *path;
        u32 pr_aptpl_buf_len;
        int ret;
        loff_t pos = 0;
 
-       memset(path, 0, 512);
-
-       if (strlen(&wwn->unit_serial[0]) >= 512) {
-               pr_err("WWN value for struct se_device does not fit"
-                       " into path buffer\n");
-               return -EMSGSIZE;
-       }
+       path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+                       &wwn->unit_serial[0]);
+       if (!path)
+               return -ENOMEM;
 
-       snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
        file = filp_open(path, flags, 0600);
        if (IS_ERR(file)) {
                pr_err("filp_open(%s) for APTPL metadata"
                        " failed\n", path);
+               kfree(path);
                return PTR_ERR(file);
        }
 
@@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
        if (ret < 0)
                pr_debug("Error writing APTPL metadata file: %s\n", path);
        fput(file);
+       kfree(path);
 
        return (ret < 0) ? -EIO : 0;
 }
@@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
                                        register_type, 0)) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               return TCM_INVALID_PARAMETER_LIST;
+                               return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                        }
                } else {
                        /*
@@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
         */
        buf = transport_kmap_data_sg(cmd);
        if (!buf) {
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                goto out_put_pr_reg;
        }
 
@@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
 
        buf = transport_kmap_data_sg(cmd);
        if (!buf) {
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                goto out_put_pr_reg;
        }
        proto_ident = (buf[24] & 0x0f);
@@ -3466,7 +3468,7 @@ after_iport_check:
                if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
                                        dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
                                        iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
-                       ret = TCM_INVALID_PARAMETER_LIST;
+                       ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
                        goto out;
                }
                spin_lock(&dev->dev_reservation_lock);
@@ -3528,8 +3530,6 @@ after_iport_check:
 
        core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
 
-       transport_kunmap_data_sg(cmd);
-
        core_scsi3_put_pr_reg(dest_pr_reg);
        return 0;
 out:
@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                 * Set the ADDITIONAL DESCRIPTOR LENGTH
                 */
                put_unaligned_be32(desc_len, &buf[off]);
+               off += 4;
                /*
                 * Size of full desctipor header minus TransportID
                 * containing $FABRIC_MOD specific) initiator device/port
index e22847bd79b95b9bfb76a12a6747a0d5b913e44d..9c7bc1ca341a6821582b79e0916901346b841515 100644 (file)
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
                spin_unlock(&se_cmd->t_state_lock);
                return false;
        }
+       if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+               if (se_cmd->scsi_status) {
+                       pr_debug("Attempted to abort io tag: %llu early failure"
+                                " status: 0x%02x\n", se_cmd->tag,
+                                se_cmd->scsi_status);
+                       spin_unlock(&se_cmd->t_state_lock);
+                       return false;
+               }
+       }
        if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
                pr_debug("Attempted to abort io tag: %llu already shutdown,"
                        " skipping\n", se_cmd->tag);
@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
         * LUN_RESET tmr..
         */
        spin_lock_irqsave(&dev->se_tmr_lock, flags);
-       list_del_init(&tmr->tmr_list);
+       if (tmr)
+               list_del_init(&tmr->tmr_list);
        list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
                cmd = tmr_p->task_cmd;
                if (!cmd) {
index 836d552b0385e978bc1a0b98c59a3379c262fd61..58caacd54a3b2a650061d558097d1179e1031035 100644 (file)
@@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev, int err, bool write_pending);
-static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
@@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
        if (transport_cmd_check_stop_to_fabric(cmd))
                return 1;
        if (remove && ack_kref)
-               ret = transport_put_cmd(cmd);
+               ret = target_put_sess_cmd(cmd);
 
        return ret;
 }
@@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
 {
        int ret = 0, post_ret = 0;
 
-       if (transport_check_aborted_status(cmd, 1))
-               return;
-
        pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
                 sense_reason);
        target_show_cmd("-----[ ", cmd);
@@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
         * For SAM Task Attribute emulation for failed struct se_cmd
         */
        transport_complete_task_attr(cmd);
+
        /*
         * Handle special case for COMPARE_AND_WRITE failure, where the
         * callback is expected to drop the per device ->caw_sem.
@@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
             cmd->transport_complete_callback)
                cmd->transport_complete_callback(cmd, false, &post_ret);
 
+       if (transport_check_aborted_status(cmd, 1))
+               return;
+
        switch (sense_reason) {
        case TCM_NON_EXISTENT_LUN:
        case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
                break;
        case TCM_OUT_OF_RESOURCES:
-               sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               break;
+               cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+               goto queue_status;
        case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
@@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
                                               cmd->orig_fe_lun, 0x2C,
                                        ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
                }
-               trace_target_cmd_complete(cmd);
-               ret = cmd->se_tfo->queue_status(cmd);
-               if (ret)
-                       goto queue_full;
-               goto check_stop;
+
+               goto queue_status;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
                        cmd->t_task_cdb[0], sense_reason);
@@ -1816,6 +1813,11 @@ check_stop:
        transport_cmd_check_stop_to_fabric(cmd);
        return;
 
+queue_status:
+       trace_target_cmd_complete(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (!ret)
+               goto check_stop;
 queue_full:
        transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
@@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
        }
 
        cmd->t_state = TRANSPORT_PROCESSING;
+       cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
        cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
        spin_unlock_irq(&cmd->t_state_lock);
 
@@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
                list_del(&cmd->se_delayed_node);
                spin_unlock(&dev->delayed_cmd_lock);
 
+               cmd->transport_state |= CMD_T_SENT;
+
                __target_execute_cmd(cmd, true);
 
                if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
                         dev->dev_cur_ordered_id);
        }
+       cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
 restart:
        target_restart_delayed_cmds(dev);
 }
@@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
                        ret = cmd->se_tfo->queue_data_in(cmd);
                        break;
                }
-               /* Fall through for DMA_TO_DEVICE */
+               /* fall through */
        case DMA_NONE:
 queue_status:
                trace_target_cmd_complete(cmd);
@@ -2268,7 +2275,7 @@ queue_rsp:
                                goto queue_full;
                        break;
                }
-               /* Fall through for DMA_TO_DEVICE */
+               /* fall through */
        case DMA_NONE:
 queue_status:
                trace_target_cmd_complete(cmd);
@@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
        cmd->t_bidi_data_nents = 0;
 }
 
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd:       command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
-       BUG_ON(!cmd->se_tfo);
-       /*
-        * If this cmd has been setup with target_get_sess_cmd(), drop
-        * the kref and call ->release_cmd() in kref callback.
-        */
-       return target_put_sess_cmd(cmd);
-}
-
 void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
        struct scatterlist *sg = cmd->t_data_sg;
@@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
 
 static void transport_write_pending_qf(struct se_cmd *cmd)
 {
+       unsigned long flags;
        int ret;
+       bool stop;
+
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+       if (stop) {
+               pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+                       __func__, __LINE__, cmd->tag);
+               complete_all(&cmd->t_transport_stop_comp);
+               return;
+       }
 
        ret = cmd->se_tfo->write_pending(cmd);
        if (ret) {
@@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
                        target_wait_free_cmd(cmd, &aborted, &tas);
 
                if (!aborted || tas)
-                       ret = transport_put_cmd(cmd);
+                       ret = target_put_sess_cmd(cmd);
        } else {
                if (wait_for_tasks)
                        target_wait_free_cmd(cmd, &aborted, &tas);
@@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
                        transport_lun_remove_cmd(cmd);
 
                if (!aborted || tas)
-                       ret = transport_put_cmd(cmd);
+                       ret = target_put_sess_cmd(cmd);
        }
        /*
         * If the task has been internally aborted due to TMR ABORT_TASK
@@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
                ret = -ESHUTDOWN;
                goto out;
        }
+       se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
        list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
 out:
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
                .key = NOT_READY,
                .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
        },
+       [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+               /*
+                * From spc4r22 section5.7.7,5.7.8
+                * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+                * or a REGISTER AND IGNORE EXISTING KEY service action or
+                * REGISTER AND MOVE service actionis attempted,
+                * but there are insufficient device server resources to complete the
+                * operation, then the command shall be terminated with CHECK CONDITION
+                * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+                * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+                */
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x55,
+               .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+       },
 };
 
 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
index 9469695f5871aea064bea2e4b4f65e32742c3cb4..a415d87f22d24237f1ae67539cfbb91a33ddbc9d 100644 (file)
@@ -150,6 +150,8 @@ struct tcmu_dev {
        wait_queue_head_t nl_cmd_wq;
 
        char dev_config[TCMU_CONFIG_LEN];
+
+       int nl_reply_supported;
 };
 
 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
        struct se_device *se_dev = se_cmd->se_dev;
        struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
-       int cmd_id;
 
        tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
        if (!tcmu_cmd)
@@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
 
        tcmu_cmd->se_cmd = se_cmd;
        tcmu_cmd->tcmu_dev = udev;
-       if (udev->cmd_time_out)
-               tcmu_cmd->deadline = jiffies +
-                                       msecs_to_jiffies(udev->cmd_time_out);
 
        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
        tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
@@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
                return NULL;
        }
 
-       idr_preload(GFP_KERNEL);
-       spin_lock_irq(&udev->commands_lock);
-       cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
-               USHRT_MAX, GFP_NOWAIT);
-       spin_unlock_irq(&udev->commands_lock);
-       idr_preload_end();
-
-       if (cmd_id < 0) {
-               tcmu_free_cmd(tcmu_cmd);
-               return NULL;
-       }
-       tcmu_cmd->cmd_id = cmd_id;
-
        return tcmu_cmd;
 }
 
@@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
        return command_size;
 }
 
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+{
+       struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+       unsigned long tmo = udev->cmd_time_out;
+       int cmd_id;
+
+       if (tcmu_cmd->cmd_id)
+               return 0;
+
+       cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+       if (cmd_id < 0) {
+               pr_err("tcmu: Could not allocate cmd id.\n");
+               return cmd_id;
+       }
+       tcmu_cmd->cmd_id = cmd_id;
+
+       if (!tmo)
+               return 0;
+
+       tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+       mod_timer(&udev->timeout, tcmu_cmd->deadline);
+       return 0;
+}
+
 static sense_reason_t
 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 {
@@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        entry = (void *) mb + CMDR_OFF + cmd_head;
        memset(entry, 0, command_size);
        tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
-       entry->hdr.cmd_id = tcmu_cmd->cmd_id;
 
        /* Handle allocating space from the data area */
        tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        }
        entry->req.iov_bidi_cnt = iov_cnt;
 
+       ret = tcmu_setup_cmd_timer(tcmu_cmd);
+       if (ret) {
+               tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+               mutex_unlock(&udev->cmdr_lock);
+               return TCM_OUT_OF_RESOURCES;
+       }
+       entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
        /*
         * Recalaulate the command's base size and size according
         * to the actual needs
@@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 static sense_reason_t
 tcmu_queue_cmd(struct se_cmd *se_cmd)
 {
-       struct se_device *se_dev = se_cmd->se_dev;
-       struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
        sense_reason_t ret;
 
@@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
        ret = tcmu_queue_cmd_ring(tcmu_cmd);
        if (ret != TCM_NO_SENSE) {
                pr_err("TCMU: Could not queue command\n");
-               spin_lock_irq(&udev->commands_lock);
-               idr_remove(&udev->commands, tcmu_cmd->cmd_id);
-               spin_unlock_irq(&udev->commands_lock);
 
                tcmu_free_cmd(tcmu_cmd);
        }
@@ -1044,9 +1055,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
        return 0;
 }
 
-static void tcmu_device_timedout(unsigned long data)
+static void tcmu_device_timedout(struct timer_list *t)
 {
-       struct tcmu_dev *udev = (struct tcmu_dev *)data;
+       struct tcmu_dev *udev = from_timer(udev, t, timeout);
        unsigned long flags;
 
        spin_lock_irqsave(&udev->commands_lock, flags);
@@ -1106,12 +1117,13 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        idr_init(&udev->commands);
        spin_lock_init(&udev->commands_lock);
 
-       setup_timer(&udev->timeout, tcmu_device_timedout,
-               (unsigned long)udev);
+       timer_setup(&udev->timeout, tcmu_device_timedout, 0);
 
        init_waitqueue_head(&udev->nl_cmd_wq);
        spin_lock_init(&udev->nl_cmd_lock);
 
+       INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+
        return &udev->se_dev;
 }
 
@@ -1280,10 +1292,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
        kfree(udev);
 }
 
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+               kmem_cache_free(tcmu_cmd_cache, cmd);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static void tcmu_blocks_release(struct tcmu_dev *udev)
+{
+       int i;
+       struct page *page;
+
+       /* Try to release all block pages */
+       mutex_lock(&udev->cmdr_lock);
+       for (i = 0; i <= udev->dbi_max; i++) {
+               page = radix_tree_delete(&udev->data_blocks, i);
+               if (page) {
+                       __free_page(page);
+                       atomic_dec(&global_db_count);
+               }
+       }
+       mutex_unlock(&udev->cmdr_lock);
+}
+
 static void tcmu_dev_kref_release(struct kref *kref)
 {
        struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
        struct se_device *dev = &udev->se_dev;
+       struct tcmu_cmd *cmd;
+       bool all_expired = true;
+       int i;
+
+       vfree(udev->mb_addr);
+       udev->mb_addr = NULL;
+
+       /* Upper layer should drain all requests before calling this */
+       spin_lock_irq(&udev->commands_lock);
+       idr_for_each_entry(&udev->commands, cmd, i) {
+               if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+                       all_expired = false;
+       }
+       idr_destroy(&udev->commands);
+       spin_unlock_irq(&udev->commands_lock);
+       WARN_ON(!all_expired);
+
+       tcmu_blocks_release(udev);
 
        call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
 }
@@ -1306,6 +1362,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
 
        if (!tcmu_kern_cmd_reply_supported)
                return;
+
+       if (udev->nl_reply_supported <= 0)
+               return;
+
 relock:
        spin_lock(&udev->nl_cmd_lock);
 
@@ -1332,6 +1392,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
        if (!tcmu_kern_cmd_reply_supported)
                return 0;
 
+       if (udev->nl_reply_supported <= 0)
+               return 0;
+
        pr_debug("sleeping for nl reply\n");
        wait_for_completion(&nl_cmd->complete);
 
@@ -1476,8 +1539,6 @@ static int tcmu_configure_device(struct se_device *dev)
        WARN_ON(udev->data_size % PAGE_SIZE);
        WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
 
-       INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
-
        info->version = __stringify(TCMU_MAILBOX_VERSION);
 
        info->mem[0].name = "tcm-user command & data buffer";
@@ -1506,6 +1567,12 @@ static int tcmu_configure_device(struct se_device *dev)
                dev->dev_attrib.emulate_write_cache = 0;
        dev->dev_attrib.hw_queue_depth = 128;
 
+       /* If user didn't explicitly disable netlink reply support, use
+        * module scope setting.
+        */
+       if (udev->nl_reply_supported >= 0)
+               udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
        /*
         * Get a ref incase userspace does a close on the uio device before
         * LIO has initiated tcmu_free_device.
@@ -1527,6 +1594,7 @@ err_netlink:
        uio_unregister_device(&udev->uio_info);
 err_register:
        vfree(udev->mb_addr);
+       udev->mb_addr = NULL;
 err_vzalloc:
        kfree(info->name);
        info->name = NULL;
@@ -1534,37 +1602,11 @@ err_vzalloc:
        return ret;
 }
 
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
-       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
-               kmem_cache_free(tcmu_cmd_cache, cmd);
-               return 0;
-       }
-       return -EINVAL;
-}
-
 static bool tcmu_dev_configured(struct tcmu_dev *udev)
 {
        return udev->uio_info.uio_dev ? true : false;
 }
 
-static void tcmu_blocks_release(struct tcmu_dev *udev)
-{
-       int i;
-       struct page *page;
-
-       /* Try to release all block pages */
-       mutex_lock(&udev->cmdr_lock);
-       for (i = 0; i <= udev->dbi_max; i++) {
-               page = radix_tree_delete(&udev->data_blocks, i);
-               if (page) {
-                       __free_page(page);
-                       atomic_dec(&global_db_count);
-               }
-       }
-       mutex_unlock(&udev->cmdr_lock);
-}
-
 static void tcmu_free_device(struct se_device *dev)
 {
        struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1576,9 +1618,6 @@ static void tcmu_free_device(struct se_device *dev)
 static void tcmu_destroy_device(struct se_device *dev)
 {
        struct tcmu_dev *udev = TCMU_DEV(dev);
-       struct tcmu_cmd *cmd;
-       bool all_expired = true;
-       int i;
 
        del_timer_sync(&udev->timeout);
 
@@ -1586,20 +1625,6 @@ static void tcmu_destroy_device(struct se_device *dev)
        list_del(&udev->node);
        mutex_unlock(&root_udev_mutex);
 
-       vfree(udev->mb_addr);
-
-       /* Upper layer should drain all requests before calling this */
-       spin_lock_irq(&udev->commands_lock);
-       idr_for_each_entry(&udev->commands, cmd, i) {
-               if (tcmu_check_and_free_pending_cmd(cmd) != 0)
-                       all_expired = false;
-       }
-       idr_destroy(&udev->commands);
-       spin_unlock_irq(&udev->commands_lock);
-       WARN_ON(!all_expired);
-
-       tcmu_blocks_release(udev);
-
        tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
 
        uio_unregister_device(&udev->uio_info);
@@ -1610,7 +1635,7 @@ static void tcmu_destroy_device(struct se_device *dev)
 
 enum {
        Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
-       Opt_err,
+       Opt_nl_reply_supported, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -1618,6 +1643,7 @@ static match_table_t tokens = {
        {Opt_dev_size, "dev_size=%u"},
        {Opt_hw_block_size, "hw_block_size=%u"},
        {Opt_hw_max_sectors, "hw_max_sectors=%u"},
+       {Opt_nl_reply_supported, "nl_reply_supported=%d"},
        {Opt_err, NULL}
 };
 
@@ -1692,6 +1718,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                        ret = tcmu_set_dev_attrib(&args[0],
                                        &(dev->dev_attrib.hw_max_sectors));
                        break;
+               case Opt_nl_reply_supported:
+                       arg_p = match_strdup(&args[0]);
+                       if (!arg_p) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
+                       kfree(arg_p);
+                       if (ret < 0)
+                               pr_err("kstrtoint() failed for nl_reply_supported=\n");
+                       break;
                default:
                        break;
                }
@@ -1734,8 +1771,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
 {
        struct se_dev_attrib *da = container_of(to_config_group(item),
                                        struct se_dev_attrib, da_group);
-       struct tcmu_dev *udev = container_of(da->da_dev,
-                                       struct tcmu_dev, se_dev);
+       struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
 
        return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
 }
@@ -1842,6 +1878,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
 }
 CONFIGFS_ATTR(tcmu_, dev_size);
 
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+               char *page)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                               struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+       return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct se_dev_attrib *da = container_of(to_config_group(item),
+                                               struct se_dev_attrib, da_group);
+       struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+       s8 val;
+       int ret;
+
+       ret = kstrtos8(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       udev->nl_reply_supported = val;
+       return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
                                             char *page)
 {
@@ -1884,6 +1948,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
        &tcmu_attr_dev_config,
        &tcmu_attr_dev_size,
        &tcmu_attr_emulate_write_cache,
+       &tcmu_attr_nl_reply_supported,
        NULL,
 };
 
index 5d442469c95e94a1ea1e5c634eb6bb62a92d48b7..cf0bde3bb927439a126913935d89bcdbc5fb6941 100644 (file)
@@ -279,7 +279,7 @@ static unsigned detect_isa_irq(void __iomem *);
 #endif                         /* CONFIG_ISA */
 
 #ifndef CONFIG_CYZ_INTR
-static void cyz_poll(unsigned long);
+static void cyz_poll(struct timer_list *);
 
 /* The Cyclades-Z polling cycle is defined by this variable */
 static long cyz_polling_cycle = CZ_DEF_POLL;
@@ -1214,7 +1214,7 @@ static void cyz_rx_restart(struct timer_list *t)
 
 #else                          /* CONFIG_CYZ_INTR */
 
-static void cyz_poll(unsigned long arg)
+static void cyz_poll(struct timer_list *unused)
 {
        struct cyclades_card *cinfo;
        struct cyclades_port *info;
index a6b8240af6cdd6dcfb6bbb07e39e39bc13bf4f7b..b0baa4ce10f9897d5f2284c7aa6e50c6dd5bb7de 100644 (file)
@@ -33,7 +33,7 @@ static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
                                         unsigned int address,
                                         const unsigned char *data, int len,
                                         int is_last);
-static void ipwireless_setup_timer(unsigned long data);
+static void ipwireless_setup_timer(struct timer_list *t);
 static void handle_received_CTRL_packet(struct ipw_hardware *hw,
                unsigned int channel_idx, const unsigned char *data, int len);
 
@@ -1635,8 +1635,7 @@ struct ipw_hardware *ipwireless_hardware_create(void)
        spin_lock_init(&hw->lock);
        tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
        INIT_WORK(&hw->work_rx, ipw_receive_data_work);
-       setup_timer(&hw->setup_timer, ipwireless_setup_timer,
-                       (unsigned long) hw);
+       timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
 
        return hw;
 }
@@ -1670,12 +1669,12 @@ void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
        hw->init_loops = 0;
        printk(KERN_INFO IPWIRELESS_PCCARD_NAME
               ": waiting for card to start up...\n");
-       ipwireless_setup_timer((unsigned long) hw);
+       ipwireless_setup_timer(&hw->setup_timer);
 }
 
-static void ipwireless_setup_timer(unsigned long data)
+static void ipwireless_setup_timer(struct timer_list *t)
 {
-       struct ipw_hardware *hw = (struct ipw_hardware *) data;
+       struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
 
        hw->init_loops++;
 
index ee7958ab269f901fb6e5ae44d285a3e7f7a0bbaa..015686ff48255fedbf0705111f9abc2afd99e63b 100644 (file)
@@ -170,7 +170,7 @@ static struct pci_driver isicom_driver = {
 static int prev_card = 3;      /*      start servicing isi_card[0]     */
 static struct tty_driver *isicom_normal;
 
-static void isicom_tx(unsigned long _data);
+static void isicom_tx(struct timer_list *unused);
 static void isicom_start(struct tty_struct *tty);
 
 static DEFINE_TIMER(tx, isicom_tx);
@@ -394,7 +394,7 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
  *     will do the rest of the work for us.
  */
 
-static void isicom_tx(unsigned long _data)
+static void isicom_tx(struct timer_list *unused)
 {
        unsigned long flags, base;
        unsigned int retries;
index 65a70f3c7cde2b2472e39d795fd65ef81f521648..68cbc03aab4b86e49a3f1f8904de52ef91f4b5c3 100644 (file)
@@ -198,7 +198,7 @@ static void moxa_hangup(struct tty_struct *);
 static int moxa_tiocmget(struct tty_struct *tty);
 static int moxa_tiocmset(struct tty_struct *tty,
                         unsigned int set, unsigned int clear);
-static void moxa_poll(unsigned long);
+static void moxa_poll(struct timer_list *);
 static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
 static void moxa_shutdown(struct tty_port *);
 static int moxa_carrier_raised(struct tty_port *);
@@ -1429,7 +1429,7 @@ put:
        return 0;
 }
 
-static void moxa_poll(unsigned long ignored)
+static void moxa_poll(struct timer_list *unused)
 {
        struct moxa_board_conf *brd;
        u16 __iomem *ip;
index 3a39eb685c693d4f3a85df528a0a3cded9be442b..5131bdc9e765037f882203f1b9b6afa8bf01656c 100644 (file)
@@ -1310,9 +1310,9 @@ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
  *     gsm->pending_cmd will be NULL and we just let the timer expire.
  */
 
-static void gsm_control_retransmit(unsigned long data)
+static void gsm_control_retransmit(struct timer_list *t)
 {
-       struct gsm_mux *gsm = (struct gsm_mux *)data;
+       struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
        struct gsm_control *ctrl;
        unsigned long flags;
        spin_lock_irqsave(&gsm->control_lock, flags);
@@ -1453,9 +1453,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
  *     end will get a DM response)
  */
 
-static void gsm_dlci_t1(unsigned long data)
+static void gsm_dlci_t1(struct timer_list *t)
 {
-       struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+       struct gsm_dlci *dlci = from_timer(dlci, t, t1);
        struct gsm_mux *gsm = dlci->gsm;
 
        switch (dlci->state) {
@@ -1634,7 +1634,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
        }
 
        skb_queue_head_init(&dlci->skb_list);
-       setup_timer(&dlci->t1, gsm_dlci_t1, (unsigned long)dlci);
+       timer_setup(&dlci->t1, gsm_dlci_t1, 0);
        tty_port_init(&dlci->port);
        dlci->port.ops = &gsm_port_ops;
        dlci->gsm = gsm;
@@ -2088,7 +2088,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
        struct gsm_dlci *dlci;
        int i = 0;
 
-       setup_timer(&gsm->t2_timer, gsm_control_retransmit, (unsigned long)gsm);
+       timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
        init_waitqueue_head(&gsm->event);
        spin_lock_init(&gsm->control_lock);
        spin_lock_init(&gsm->tx_lock);
index 9f246d4db3caa605c8811df37c27bf098fd17e6f..30bb0900cd2f5b5c83089202b62af6add9c45e25 100644 (file)
@@ -115,7 +115,7 @@ static void retry_transmit(struct r3964_info *pInfo);
 static void transmit_block(struct r3964_info *pInfo);
 static void receive_char(struct r3964_info *pInfo, const unsigned char c);
 static void receive_error(struct r3964_info *pInfo, const char flag);
-static void on_timeout(unsigned long priv);
+static void on_timeout(struct timer_list *t);
 static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
 static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
                unsigned char __user * buf);
@@ -688,9 +688,9 @@ static void receive_error(struct r3964_info *pInfo, const char flag)
        }
 }
 
-static void on_timeout(unsigned long priv)
+static void on_timeout(struct timer_list *t)
 {
-       struct r3964_info *pInfo = (void *)priv;
+       struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
 
        switch (pInfo->state) {
        case R3964_TX_REQUEST:
@@ -993,7 +993,7 @@ static int r3964_open(struct tty_struct *tty)
        tty->disc_data = pInfo;
        tty->receive_room = 65536;
 
-       setup_timer(&pInfo->tmr, on_timeout, (unsigned long)pInfo);
+       timer_setup(&pInfo->tmr, on_timeout, 0);
 
        return 0;
 }
index f7dc9b1ea806857cdc3083ed5d9b09aeef43a18c..bdd17d2aaafd957d81b382eb16e8fa1f814bb004 100644 (file)
@@ -86,7 +86,7 @@
 
 /****** RocketPort Local Variables ******/
 
-static void rp_do_poll(unsigned long dummy);
+static void rp_do_poll(struct timer_list *unused);
 
 static struct tty_driver *rocket_driver;
 
@@ -525,7 +525,7 @@ static void rp_handle_port(struct r_port *info)
 /*
  *  The top level polling routine.  Repeats every 1/100 HZ (10ms).
  */
-static void rp_do_poll(unsigned long dummy)
+static void rp_do_poll(struct timer_list *unused)
 {
        CONTROLLER_t *ctlp;
        int ctrl, aiop, ch, line;
index d64afdd93872c28580f55c4c120ce117dd530d69..9342fc2ee7dfe292bee10f607434055504401b28 100644 (file)
@@ -325,7 +325,7 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
        if (up->bugs & UART_BUG_THRE) {
                pr_debug("ttyS%d - using backup timer\n", serial_index(port));
 
-               up->timer.function = (TIMER_FUNC_TYPE)serial8250_backup_timeout;
+               up->timer.function = serial8250_backup_timeout;
                mod_timer(&up->timer, jiffies +
                          uart_poll_timeout(port) + HZ / 5);
        }
@@ -348,7 +348,7 @@ static void univ8250_release_irq(struct uart_8250_port *up)
        struct uart_port *port = &up->port;
 
        del_timer_sync(&up->timer);
-       up->timer.function = (TIMER_FUNC_TYPE)serial8250_timeout;
+       up->timer.function = serial8250_timeout;
        if (port->irq)
                serial_unlink_irq_chain(up);
 }
index 1421804975e0b08115232b3c0d7d8430fa46bf24..c9458a033e3cc0ca325e84e1c1fe4778086e9d09 100644 (file)
@@ -2059,7 +2059,7 @@ static void flush_timeout_function(unsigned long data)
 static struct timer_list flush_timer;
 
 static void
-timed_flush_handler(unsigned long ptr)
+timed_flush_handler(struct timer_list *unused)
 {
        struct e100_serial *info;
        int i;
@@ -4137,7 +4137,7 @@ static int __init rs_init(void)
        /* Setup the timed flush handler system */
 
 #if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
-       setup_timer(&flush_timer, timed_flush_handler, 0);
+       timer_setup(&flush_timer, timed_flush_handler, 0);
        mod_timer(&flush_timer, jiffies + 5);
 #endif
 
index c84e6f0db54e16662b0a258af90bdad3af280ee9..1c4d3f38713863f2ebc2e5e7f13ff5f38ac0715d 100644 (file)
@@ -966,9 +966,9 @@ static void lpuart_dma_rx_complete(void *arg)
        lpuart_copy_rx_to_tty(sport);
 }
 
-static void lpuart_timer_func(unsigned long data)
+static void lpuart_timer_func(struct timer_list *t)
 {
-       struct lpuart_port *sport = (struct lpuart_port *)data;
+       struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
 
        lpuart_copy_rx_to_tty(sport);
 }
@@ -1263,8 +1263,7 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
 
 static void rx_dma_timer_init(struct lpuart_port *sport)
 {
-               setup_timer(&sport->lpuart_timer, lpuart_timer_func,
-                               (unsigned long)sport);
+               timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
                sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
                add_timer(&sport->lpuart_timer);
 }
index 473f4f81d690c6ab21a768c0b0745347fc4d9aa5..ffefd218761e04c633171b3f1b14151e7f05b721 100644 (file)
@@ -263,9 +263,9 @@ static void mrdy_assert(struct ifx_spi_device *ifx_dev)
  *     The SPI has timed out: hang up the tty. Users will then see a hangup
  *     and error events.
  */
-static void ifx_spi_timeout(unsigned long arg)
+static void ifx_spi_timeout(struct timer_list *t)
 {
-       struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+       struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
 
        dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
        tty_port_tty_hangup(&ifx_dev->tty_port, false);
@@ -1016,8 +1016,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
        spin_lock_init(&ifx_dev->write_lock);
        spin_lock_init(&ifx_dev->power_lock);
        ifx_dev->power_status = 0;
-       setup_timer(&ifx_dev->spi_timer, ifx_spi_timeout,
-                   (unsigned long)ifx_dev);
+       timer_setup(&ifx_dev->spi_timer, ifx_spi_timeout, 0);
        ifx_dev->modem = pl_data->modem_type;
        ifx_dev->use_dma = pl_data->use_dma;
        ifx_dev->max_hz = pl_data->max_hz;
index a67a606c38eb0066565c9e1f8fb26ed9cb744980..e4b3d9123a03312b3985e8f1db7840e13e6a38e3 100644 (file)
@@ -906,9 +906,9 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
  * This is our per-port timeout handler, for checking the
  * modem status signals.
  */
-static void imx_timeout(unsigned long data)
+static void imx_timeout(struct timer_list *t)
 {
-       struct imx_port *sport = (struct imx_port *)data;
+       struct imx_port *sport = from_timer(sport, t, timer);
        unsigned long flags;
 
        if (sport->port.state) {
@@ -2082,7 +2082,7 @@ static int serial_imx_probe(struct platform_device *pdev)
        sport->port.rs485_config = imx_rs485_config;
        sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
        sport->port.flags = UPF_BOOT_AUTOCONF;
-       setup_timer(&sport->timer, imx_timeout, (unsigned long)sport);
+       timer_setup(&sport->timer, imx_timeout, 0);
 
        sport->gpios = mctrl_gpio_init(&sport->port, 0);
        if (IS_ERR(sport->gpios))
index ed2b0305862727296d5d5f8109a4a43022cda761..4029272891f9d6e4385d01b03dbb73b53d7026e5 100644 (file)
@@ -188,9 +188,9 @@ bool kgdb_nmi_poll_knock(void)
  * The tasklet is cheap, it does not cause wakeups when reschedules itself,
  * instead it waits for the next tick.
  */
-static void kgdb_nmi_tty_receiver(unsigned long data)
+static void kgdb_nmi_tty_receiver(struct timer_list *t)
 {
-       struct kgdb_nmi_tty_priv *priv = (void *)data;
+       struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
        char ch;
 
        priv->timer.expires = jiffies + (HZ/100);
@@ -241,7 +241,7 @@ static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
                return -ENOMEM;
 
        INIT_KFIFO(priv->fifo);
-       setup_timer(&priv->timer, kgdb_nmi_tty_receiver, (unsigned long)priv);
+       timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
        tty_port_init(&priv->port);
        priv->port.ops = &kgdb_nmi_tty_port_ops;
        tty->driver_data = priv;
index 27d6049eb6a9a210b7317f40ea1a7d92118cab23..371569a0fd00a8161f76ac715a734368e2419e60 100644 (file)
@@ -178,9 +178,9 @@ static void max3100_dowork(struct max3100_port *s)
                queue_work(s->workqueue, &s->work);
 }
 
-static void max3100_timeout(unsigned long data)
+static void max3100_timeout(struct timer_list *t)
 {
-       struct max3100_port *s = (struct max3100_port *)data;
+       struct max3100_port *s = from_timer(s, t, timer);
 
        if (s->port.state) {
                max3100_dowork(s);
@@ -780,8 +780,7 @@ static int max3100_probe(struct spi_device *spi)
                max3100s[i]->poll_time = 1;
        max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
        max3100s[i]->minor = i;
-       setup_timer(&max3100s[i]->timer, max3100_timeout,
-                   (unsigned long)max3100s[i]);
+       timer_setup(&max3100s[i]->timer, max3100_timeout, 0);
 
        dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
        max3100s[i]->port.irq = max3100s[i]->irq;
index 3b74369c262f1d19829dc5aceec7bb8b0b76cbae..00ce31e8d19ad852ed025f2928ef22ae603e4ea8 100644 (file)
@@ -371,7 +371,7 @@ static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
  *
  * This function periodically polls the Serial MUX to check for new data.
  */
-static void mux_poll(unsigned long unused)
+static void mux_poll(struct timer_list *unused)
 {  
        int i;
 
@@ -572,7 +572,7 @@ static int __init mux_init(void)
 
        if(port_cnt > 0) {
                /* Start the Mux timer */
-               setup_timer(&mux_timer, mux_poll, 0UL);
+               timer_setup(&mux_timer, mux_poll, 0);
                mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
 
 #ifdef CONFIG_SERIAL_MUX_CONSOLE
index f8812389b8a8bbc7ab95b5d68c7b9a17b99f7596..223a9499104e2ffc49a928aa4a6caa26cd07db8c 100644 (file)
@@ -103,9 +103,9 @@ static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
  * This is our per-port timeout handler, for checking the
  * modem status signals.
  */
-static void pnx8xxx_timeout(unsigned long data)
+static void pnx8xxx_timeout(struct timer_list *t)
 {
-       struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
+       struct pnx8xxx_port *sport = from_timer(sport, t, timer);
        unsigned long flags;
 
        if (sport->port.state) {
@@ -662,8 +662,7 @@ static void __init pnx8xxx_init_ports(void)
        first = 0;
 
        for (i = 0; i < NR_PORTS; i++) {
-               setup_timer(&pnx8xxx_ports[i].timer, pnx8xxx_timeout,
-                           (unsigned long)&pnx8xxx_ports[i]);
+               timer_setup(&pnx8xxx_ports[i].timer, pnx8xxx_timeout, 0);
                pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
        }
 }
index 4e3f169b30cffdee4513848d22e0008eef65d989..a399772be3fc5342de88d3d202a05b32758c1d0c 100644 (file)
@@ -110,9 +110,9 @@ static void sa1100_mctrl_check(struct sa1100_port *sport)
  * This is our per-port timeout handler, for checking the
  * modem status signals.
  */
-static void sa1100_timeout(unsigned long data)
+static void sa1100_timeout(struct timer_list *t)
 {
-       struct sa1100_port *sport = (struct sa1100_port *)data;
+       struct sa1100_port *sport = from_timer(sport, t, timer);
        unsigned long flags;
 
        if (sport->port.state) {
@@ -627,8 +627,7 @@ static void __init sa1100_init_ports(void)
                sa1100_ports[i].port.fifosize  = 8;
                sa1100_ports[i].port.line      = i;
                sa1100_ports[i].port.iotype    = UPIO_MEM;
-               setup_timer(&sa1100_ports[i].timer, sa1100_timeout,
-                           (unsigned long)&sa1100_ports[i]);
+               timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0);
        }
 
        /*
index 31fcc7072a90d43b7b96d9603ef5e40a680bf61d..d9f399c4e90c0dfa957bb8cbc80b99c8ef27f454 100644 (file)
@@ -1058,9 +1058,9 @@ static int scif_rtrg_enabled(struct uart_port *port)
                        (SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
 }
 
-static void rx_fifo_timer_fn(unsigned long arg)
+static void rx_fifo_timer_fn(struct timer_list *t)
 {
-       struct sci_port *s = (struct sci_port *)arg;
+       struct sci_port *s = from_timer(s, t, rx_fifo_timer);
        struct uart_port *port = &s->port;
 
        dev_dbg(port->dev, "Rx timed out\n");
@@ -1138,8 +1138,7 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
                sci->rx_fifo_timeout = r;
                scif_set_rtrg(port, 1);
                if (r > 0)
-                       setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
-                                   (unsigned long)sci);
+                       timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
        }
 
        return count;
@@ -1392,9 +1391,9 @@ static void work_fn_tx(struct work_struct *work)
        dma_async_issue_pending(chan);
 }
 
-static void rx_timer_fn(unsigned long arg)
+static void rx_timer_fn(struct timer_list *t)
 {
-       struct sci_port *s = (struct sci_port *)arg;
+       struct sci_port *s = from_timer(s, t, rx_timer);
        struct dma_chan *chan = s->chan_rx;
        struct uart_port *port = &s->port;
        struct dma_tx_state state;
@@ -1572,7 +1571,7 @@ static void sci_request_dma(struct uart_port *port)
                        dma += s->buf_len_rx;
                }
 
-               setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
+               timer_setup(&s->rx_timer, rx_timer_fn, 0);
 
                if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
                        sci_submit_rx(s);
@@ -2238,8 +2237,7 @@ static void sci_reset(struct uart_port *port)
        if (s->rx_trigger > 1) {
                if (s->rx_fifo_timeout) {
                        scif_set_rtrg(port, 1);
-                       setup_timer(&s->rx_fifo_timer, rx_fifo_timer_fn,
-                                   (unsigned long)s);
+                       timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
                } else {
                        if (port->type == PORT_SCIFA ||
                            port->type == PORT_SCIFB)
index ed78542c4c37a2b509831790a39e36569c71768a..42b9aded4eb1c6d47a9795d743a27af9627fa563 100644 (file)
@@ -612,9 +612,9 @@ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
  * Obviously not used in interrupt mode
  *
  */
-static void sn_sal_timer_poll(unsigned long data)
+static void sn_sal_timer_poll(struct timer_list *t)
 {
-       struct sn_cons_port *port = (struct sn_cons_port *)data;
+       struct sn_cons_port *port = from_timer(port, t, sc_timer);
        unsigned long flags;
 
        if (!port)
@@ -668,7 +668,7 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
         * timer to poll for input and push data from the console
         * buffer.
         */
-       setup_timer(&port->sc_timer, sn_sal_timer_poll, (unsigned long)port);
+       timer_setup(&port->sc_timer, sn_sal_timer_poll, 0);
 
        if (IS_RUNNING_ON_SIMULATOR())
                port->sc_interrupt_timeout = 6;
index f2c34d65614462f395627dc981c19928ce77fb7f..3c4ad71f261d67144a489b4b138d8814c4b448ef 100644 (file)
@@ -700,7 +700,7 @@ static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
 
 static void usc_loopback_frame( struct mgsl_struct *info );
 
-static void mgsl_tx_timeout(unsigned long context);
+static void mgsl_tx_timeout(struct timer_list *t);
 
 
 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
@@ -1768,7 +1768,7 @@ static int startup(struct mgsl_struct * info)
        
        memset(&info->icount, 0, sizeof(info->icount));
 
-       setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
+       timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
        
        /* Allocate and claim adapter resources */
        retval = mgsl_claim_resources(info);
@@ -7517,9 +7517,9 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
  * Arguments:  context         pointer to device instance data
  * Return Value:       None
  */
-static void mgsl_tx_timeout(unsigned long context)
+static void mgsl_tx_timeout(struct timer_list *t)
 {
-       struct mgsl_struct *info = (struct mgsl_struct*)context;
+       struct mgsl_struct *info = from_timer(info, t, tx_timer);
        unsigned long flags;
        
        if ( debug_level >= DEBUG_LEVEL_INFO )
index 06a03731bba70a59e437da22c262100dd635ce6f..255c496878778d060067bc7a2b8b79459af73819 100644 (file)
@@ -493,8 +493,8 @@ static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
 static int  alloc_tmp_rbuf(struct slgt_info *info);
 static void free_tmp_rbuf(struct slgt_info *info);
 
-static void tx_timeout(unsigned long context);
-static void rx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void rx_timeout(struct timer_list *t);
 
 /*
  * ioctl handlers
@@ -3597,8 +3597,8 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
                info->adapter_num = adapter_num;
                info->port_num = port_num;
 
-               setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
-               setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
+               timer_setup(&info->tx_timer, tx_timeout, 0);
+               timer_setup(&info->rx_timer, rx_timeout, 0);
 
                /* Copy configuration info to device instance data */
                info->pdev = pdev;
@@ -5112,9 +5112,9 @@ static int adapter_test(struct slgt_info *info)
 /*
  * transmit timeout handler
  */
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
 {
-       struct slgt_info *info = (struct slgt_info*)context;
+       struct slgt_info *info = from_timer(info, t, tx_timer);
        unsigned long flags;
 
        DBGINFO(("%s tx_timeout\n", info->device_name));
@@ -5136,9 +5136,9 @@ static void tx_timeout(unsigned long context)
 /*
  * receive buffer polling timer
  */
-static void rx_timeout(unsigned long context)
+static void rx_timeout(struct timer_list *t)
 {
-       struct slgt_info *info = (struct slgt_info*)context;
+       struct slgt_info *info = from_timer(info, t, rx_timer);
        unsigned long flags;
 
        DBGINFO(("%s rx_timeout\n", info->device_name));
index d45f234e1914b81205af50df7419e45f61cf9d51..75f11ce1f0a1ac5fe8754ae2948783c51e24481f 100644 (file)
@@ -615,8 +615,8 @@ static void free_tmp_rx_buf(SLMP_INFO *info);
 
 static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count);
 static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit);
-static void tx_timeout(unsigned long context);
-static void status_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void status_timeout(struct timer_list *t);
 
 static unsigned char read_reg(SLMP_INFO *info, unsigned char addr);
 static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val);
@@ -3782,9 +3782,8 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
                info->bus_type = MGSL_BUS_TYPE_PCI;
                info->irq_flags = IRQF_SHARED;
 
-               setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
-               setup_timer(&info->status_timer, status_timeout,
-                               (unsigned long)info);
+               timer_setup(&info->tx_timer, tx_timeout, 0);
+               timer_setup(&info->status_timer, status_timeout, 0);
 
                /* Store the PCI9050 misc control register value because a flaw
                 * in the PCI9050 prevents LCR registers from being read if
@@ -5468,9 +5467,9 @@ static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
 /* called when HDLC frame times out
  * update stats and do tx completion processing
  */
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
 {
-       SLMP_INFO *info = (SLMP_INFO*)context;
+       SLMP_INFO *info = from_timer(info, t, tx_timer);
        unsigned long flags;
 
        if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5495,10 +5494,10 @@ static void tx_timeout(unsigned long context)
 
 /* called to periodically check the DSR/RI modem signal input status
  */
-static void status_timeout(unsigned long context)
+static void status_timeout(struct timer_list *t)
 {
        u16 status = 0;
-       SLMP_INFO *info = (SLMP_INFO*)context;
+       SLMP_INFO *info = from_timer(info, t, status_timer);
        unsigned long flags;
        unsigned char delta;
 
index c8d90d7e7e3766802bbdccb8ea5c990a0a08343a..5d412df8e94372217726271c7f83e1f55444ae21 100644 (file)
@@ -244,7 +244,7 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
        return 0;
 }
 
-static void kd_nosound(unsigned long ignored)
+static void kd_nosound(struct timer_list *unused)
 {
        static unsigned int zero;
 
index bce4c71cb33883fc67c230497cba9cfa2958e931..88b902c525d7455e3e0398623d5c54dc65ad0ab9 100644 (file)
@@ -158,7 +158,7 @@ static void set_cursor(struct vc_data *vc);
 static void hide_cursor(struct vc_data *vc);
 static void console_callback(struct work_struct *ignored);
 static void con_driver_unregister_callback(struct work_struct *ignored);
-static void blank_screen_t(unsigned long dummy);
+static void blank_screen_t(struct timer_list *unused);
 static void set_palette(struct vc_data *vc);
 
 #define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
@@ -3929,7 +3929,7 @@ void unblank_screen(void)
  * (console operations can still happen at irq time, but only from printk which
  * has the console mutex. Not perfect yet, but better than no locking
  */
-static void blank_screen_t(unsigned long dummy)
+static void blank_screen_t(struct timer_list *unused)
 {
        blank_timer_expired = 1;
        schedule_work(&console_work);
index 6470d259b7d8a2071052dc52a38b70626ff170db..8af797252af206c9ad70aef7d65ee8106df2107f 100644 (file)
@@ -547,21 +547,30 @@ static void cxacru_blocking_completion(struct urb *urb)
        complete(urb->context);
 }
 
-static void cxacru_timeout_kill(unsigned long data)
+struct cxacru_timer {
+       struct timer_list timer;
+       struct urb *urb;
+};
+
+static void cxacru_timeout_kill(struct timer_list *t)
 {
-       usb_unlink_urb((struct urb *) data);
+       struct cxacru_timer *timer = from_timer(timer, t, timer);
+
+       usb_unlink_urb(timer->urb);
 }
 
 static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
                                 int *actual_length)
 {
-       struct timer_list timer;
+       struct cxacru_timer timer = {
+               .urb = urb,
+       };
 
-       setup_timer(&timer, cxacru_timeout_kill, (unsigned long)urb);
-       timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
-       add_timer(&timer);
+       timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0);
+       mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
        wait_for_completion(done);
-       del_timer_sync(&timer);
+       del_timer_sync(&timer.timer);
+       destroy_timer_on_stack(&timer.timer);
 
        if (actual_length)
                *actual_length = urb->actual_length;
index 5a5e8c0aaa3994ad1a110928720a378d6b6cd9e2..973548b5c15ce11414cec038b4b47001c055c9f2 100644 (file)
@@ -557,9 +557,10 @@ static void speedtch_check_status(struct work_struct *work)
        }
 }
 
-static void speedtch_status_poll(unsigned long data)
+static void speedtch_status_poll(struct timer_list *t)
 {
-       struct speedtch_instance_data *instance = (void *)data;
+       struct speedtch_instance_data *instance = from_timer(instance, t,
+                                                            status_check_timer);
 
        schedule_work(&instance->status_check_work);
 
@@ -570,9 +571,10 @@ static void speedtch_status_poll(unsigned long data)
                atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
 }
 
-static void speedtch_resubmit_int(unsigned long data)
+static void speedtch_resubmit_int(struct timer_list *t)
 {
-       struct speedtch_instance_data *instance = (void *)data;
+       struct speedtch_instance_data *instance = from_timer(instance, t,
+                                                            resubmit_timer);
        struct urb *int_urb = instance->int_urb;
        int ret;
 
@@ -860,13 +862,11 @@ static int speedtch_bind(struct usbatm_data *usbatm,
        usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
 
        INIT_WORK(&instance->status_check_work, speedtch_check_status);
-       setup_timer(&instance->status_check_timer, speedtch_status_poll,
-                   (unsigned long)instance);
+       timer_setup(&instance->status_check_timer, speedtch_status_poll, 0);
        instance->last_status = 0xff;
        instance->poll_delay = MIN_POLL_DELAY;
 
-       setup_timer(&instance->resubmit_timer, speedtch_resubmit_int,
-                   (unsigned long)instance);
+       timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0);
 
        instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
 
index 044264aa1f965ade715e3df2fa3ba50a6d5b5543..dbea28495e1ddb49193c9acae8b15a8f08cfb0c6 100644 (file)
@@ -989,18 +989,18 @@ static int usbatm_heavy_init(struct usbatm_data *instance)
        return 0;
 }
 
-static void usbatm_tasklet_schedule(unsigned long data)
+static void usbatm_tasklet_schedule(struct timer_list *t)
 {
-       tasklet_schedule((struct tasklet_struct *) data);
+       struct usbatm_channel *channel = from_timer(channel, t, delay);
+
+       tasklet_schedule(&channel->tasklet);
 }
 
 static void usbatm_init_channel(struct usbatm_channel *channel)
 {
        spin_lock_init(&channel->lock);
        INIT_LIST_HEAD(&channel->list);
-       channel->delay.function = usbatm_tasklet_schedule;
-       channel->delay.data = (unsigned long) &channel->tasklet;
-       init_timer(&channel->delay);
+       timer_setup(&channel->delay, usbatm_tasklet_schedule, 0);
 }
 
 int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
index 19b5c4afeef205931226f0350680977de25d5f47..fc32391a34d5db0c4951d2f1a50c4be8ccb97f94 100644 (file)
@@ -788,9 +788,11 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
 EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
 
 /* timer callback */
-static void rh_timer_func (unsigned long _hcd)
+static void rh_timer_func (struct timer_list *t)
 {
-       usb_hcd_poll_rh_status((struct usb_hcd *) _hcd);
+       struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
+
+       usb_hcd_poll_rh_status(_hcd);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -2545,7 +2547,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
        hcd->self.bus_name = bus_name;
        hcd->self.uses_dma = (sysdev->dma_mask != NULL);
 
-       setup_timer(&hcd->rh_timer, rh_timer_func, (unsigned long)hcd);
+       timer_setup(&hcd->rh_timer, rh_timer_func, 0);
 #ifdef CONFIG_PM
        INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
 #endif
index 69eb40cd1b47ec195cec2f7a195b8a5051ed4f6a..7b6eb0ad513b26b33610c9b08531f8accac4a89f 100644 (file)
@@ -3314,9 +3314,9 @@ host:
        }
 }
 
-static void dwc2_wakeup_detected(unsigned long data)
+static void dwc2_wakeup_detected(struct timer_list *t)
 {
-       struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
+       struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
        u32 hprt0;
 
        dev_dbg(hsotg->dev, "%s()\n", __func__);
@@ -5155,8 +5155,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        }
        INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
 
-       setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
-                   (unsigned long)hsotg);
+       timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
 
        /* Initialize the non-periodic schedule */
        INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
index f472de238ac26ad9824f0322f0f80fc78c89f3c4..fcd1676c7f0b7eae8b942aa9a50f6bcfb8cdc813 100644 (file)
@@ -1275,9 +1275,9 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  *
  * @work: Pointer to a qh unreserve_work.
  */
-static void dwc2_unreserve_timer_fn(unsigned long data)
+static void dwc2_unreserve_timer_fn(struct timer_list *t)
 {
-       struct dwc2_qh *qh = (struct dwc2_qh *)data;
+       struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
        struct dwc2_hsotg *hsotg = qh->hsotg;
        unsigned long flags;
 
@@ -1467,8 +1467,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
 
        /* Initialize QH */
        qh->hsotg = hsotg;
-       setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
-                   (unsigned long)qh);
+       timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
        qh->ep_type = ep_type;
        qh->ep_is_in = ep_is_in;
 
index bfe278294e889058aa642077b78dd0b97263f7ff..ad743a8493be2eceabb2031a48a834c702b823b9 100644 (file)
@@ -1550,9 +1550,9 @@ static void at91_vbus_timer_work(struct work_struct *work)
                mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
 }
 
-static void at91_vbus_timer(unsigned long data)
+static void at91_vbus_timer(struct timer_list *t)
 {
-       struct at91_udc *udc = (struct at91_udc *)data;
+       struct at91_udc *udc = from_timer(udc, t, vbus_timer);
 
        /*
         * If we are polling vbus it is likely that the gpio is on an
@@ -1918,8 +1918,7 @@ static int at91udc_probe(struct platform_device *pdev)
 
                if (udc->board.vbus_polled) {
                        INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
-                       setup_timer(&udc->vbus_timer, at91_vbus_timer,
-                                   (unsigned long)udc);
+                       timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
                        mod_timer(&udc->vbus_timer,
                                  jiffies + VBUS_POLL_TIMEOUT);
                } else {
index 4f1b1809472c41446b34904fb857a965f33c1afd..d0128f92ec5af312af880d7822a316587992b1a8 100644 (file)
@@ -1771,9 +1771,9 @@ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
 /* drive both sides of the transfers; looks like irq handlers to
  * both drivers except the callbacks aren't in_irq().
  */
-static void dummy_timer(unsigned long _dum_hcd)
+static void dummy_timer(struct timer_list *t)
 {
-       struct dummy_hcd        *dum_hcd = (struct dummy_hcd *) _dum_hcd;
+       struct dummy_hcd        *dum_hcd = from_timer(dum_hcd, t, timer);
        struct dummy            *dum = dum_hcd->dum;
        struct urbp             *urbp, *tmp;
        unsigned long           flags;
@@ -2445,7 +2445,7 @@ static DEVICE_ATTR_RO(urbs);
 
 static int dummy_start_ss(struct dummy_hcd *dum_hcd)
 {
-       setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+       timer_setup(&dum_hcd->timer, dummy_timer, 0);
        dum_hcd->rh_state = DUMMY_RH_RUNNING;
        dum_hcd->stream_en_ep = 0;
        INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2474,7 +2474,7 @@ static int dummy_start(struct usb_hcd *hcd)
                return dummy_start_ss(dum_hcd);
 
        spin_lock_init(&dum_hcd->dum->lock);
-       setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+       timer_setup(&dum_hcd->timer, dummy_timer, 0);
        dum_hcd->rh_state = DUMMY_RH_RUNNING;
 
        INIT_LIST_HEAD(&dum_hcd->urbp_list);
index f19e6282a688d030e3304fcb17e227efbd30207a..a8288df6aadf09c5523d1922d6a71f0c646940fb 100644 (file)
@@ -1259,9 +1259,9 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
        return IRQ_HANDLED;
 }
 
-static void m66592_timer(unsigned long _m66592)
+static void m66592_timer(struct timer_list *t)
 {
-       struct m66592 *m66592 = (struct m66592 *)_m66592;
+       struct m66592 *m66592 = from_timer(m66592, t, timer);
        unsigned long flags;
        u16 tmp;
 
@@ -1589,7 +1589,7 @@ static int m66592_probe(struct platform_device *pdev)
        m66592->gadget.max_speed = USB_SPEED_HIGH;
        m66592->gadget.name = udc_name;
 
-       setup_timer(&m66592->timer, m66592_timer, (unsigned long)m66592);
+       timer_setup(&m66592->timer, m66592_timer, 0);
        m66592->reg = reg;
 
        ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
index fc7f810baef79e8a107dd8105656b4283b22f0d0..dc35a54bad9088589339539f88c736bb9f63bf7c 100644 (file)
@@ -1854,9 +1854,9 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
 #define PIO_OUT_TIMEOUT        (jiffies + HZ/3)
 #define HALF_FULL(f)   (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
 
-static void pio_out_timer(unsigned long _ep)
+static void pio_out_timer(struct timer_list *t)
 {
-       struct omap_ep  *ep = (void *) _ep;
+       struct omap_ep  *ep = from_timer(ep, t, timer);
        unsigned long   flags;
        u16             stat_flg;
 
@@ -2542,9 +2542,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
                }
                if (dbuf && addr)
                        epn_rxtx |= UDC_EPN_RX_DB;
-               init_timer(&ep->timer);
-               ep->timer.function = pio_out_timer;
-               ep->timer.data = (unsigned long) ep;
+               timer_setup(&ep->timer, pio_out_timer, 0);
        }
        if (addr)
                epn_rxtx |= UDC_EPN_RX_VALID;
index 8f135d9fa245984e5f2a40108eb5bc159cb7d8fc..0e3f5faa000e93cca3349930111ea0072ed7989b 100644 (file)
@@ -1624,9 +1624,9 @@ static inline void clear_ep_state (struct pxa25x_udc *dev)
                nuke(&dev->ep[i], -ECONNABORTED);
 }
 
-static void udc_watchdog(unsigned long _dev)
+static void udc_watchdog(struct timer_list *t)
 {
-       struct pxa25x_udc       *dev = (void *)_dev;
+       struct pxa25x_udc       *dev = from_timer(dev, t, timer);
 
        local_irq_disable();
        if (dev->ep0state == EP0_STALL
@@ -2413,7 +2413,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
                gpio_direction_output(dev->mach->gpio_pullup, 0);
        }
 
-       setup_timer(&dev->timer, udc_watchdog, (unsigned long)dev);
+       timer_setup(&dev->timer, udc_watchdog, 0);
 
        the_controller = dev;
        platform_set_drvdata(pdev, dev);
index 143122ed3c6646fdefb439daf1288cf3d4b4297a..a3ecce62662ba6cdc1e0f7ee65e90b2445537b56 100644 (file)
@@ -1514,9 +1514,9 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
        return IRQ_HANDLED;
 }
 
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
 {
-       struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+       struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
        unsigned long flags;
        u16 tmp;
 
@@ -1874,7 +1874,7 @@ static int r8a66597_probe(struct platform_device *pdev)
        r8a66597->gadget.max_speed = USB_SPEED_HIGH;
        r8a66597->gadget.name = udc_name;
 
-       setup_timer(&r8a66597->timer, r8a66597_timer, (unsigned long)r8a66597);
+       timer_setup(&r8a66597->timer, r8a66597_timer, 0);
        r8a66597->reg = reg;
 
        if (r8a66597->pdata->on_chip) {
index 10887e09e9bc0ce916df1eee3fe217421e8199b0..ee96763493332458da2397bc85a24e8dfa5b70e6 100644 (file)
@@ -80,7 +80,7 @@ static const char     hcd_name [] = "ohci_hcd";
 
 static void ohci_dump(struct ohci_hcd *ohci);
 static void ohci_stop(struct usb_hcd *hcd);
-static void io_watchdog_func(unsigned long _ohci);
+static void io_watchdog_func(struct timer_list *t);
 
 #include "ohci-hub.c"
 #include "ohci-dbg.c"
@@ -500,8 +500,7 @@ static int ohci_init (struct ohci_hcd *ohci)
        if (ohci->hcca)
                return 0;
 
-       setup_timer(&ohci->io_watchdog, io_watchdog_func,
-                       (unsigned long) ohci);
+       timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
 
        ohci->hcca = dma_alloc_coherent (hcd->self.controller,
                        sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -723,9 +722,9 @@ static int ohci_start(struct usb_hcd *hcd)
  * the unlink list.  As a result, URBs could never be dequeued and
  * endpoints could never be released.
  */
-static void io_watchdog_func(unsigned long _ohci)
+static void io_watchdog_func(struct timer_list *t)
 {
-       struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
+       struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
        bool            takeback_all_pending = false;
        u32             status;
        u32             head;
index 0bf7759aae789e9705cc52e90fbb5dca41d15701..c5e6e8d0b5ef5fe6428c7838df74e015262ddbe0 100644 (file)
@@ -2539,9 +2539,9 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd)
        return ret;
 }
 
-static void oxu_watchdog(unsigned long param)
+static void oxu_watchdog(struct timer_list *t)
 {
-       struct oxu_hcd  *oxu = (struct oxu_hcd *) param;
+       struct oxu_hcd  *oxu = from_timer(oxu, t, watchdog);
        unsigned long flags;
 
        spin_lock_irqsave(&oxu->lock, flags);
@@ -2577,7 +2577,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
 
        spin_lock_init(&oxu->lock);
 
-       setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
+       timer_setup(&oxu->watchdog, oxu_watchdog, 0);
 
        /*
         * hw default: 1K periodic list heads, one per frame.
index f3d9ba420a97b5a3d932fa51a0952df40d5480be..984892dd72f550a2de7dafac8a2d64ca8a0ab215 100644 (file)
@@ -1798,9 +1798,9 @@ static void r8a66597_td_timer(struct timer_list *t)
        spin_unlock_irqrestore(&r8a66597->lock, flags);
 }
 
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
 {
-       struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+       struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
        unsigned long flags;
        int port;
 
@@ -2472,8 +2472,7 @@ static int r8a66597_probe(struct platform_device *pdev)
                r8a66597->max_root_hub = 2;
 
        spin_lock_init(&r8a66597->lock);
-       setup_timer(&r8a66597->rh_timer, r8a66597_timer,
-                   (unsigned long)r8a66597);
+       timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
        r8a66597->reg = reg;
 
        /* make sure no interrupts are pending */
index 601fb00603cc1d4737f3cd0c3d238544447a4830..fa88a903fa2ea886e8f08e52e825049ae1b3864f 100644 (file)
@@ -1119,9 +1119,9 @@ sl811h_hub_descriptor (
 }
 
 static void
-sl811h_timer(unsigned long _sl811)
+sl811h_timer(struct timer_list *t)
 {
-       struct sl811    *sl811 = (void *) _sl811;
+       struct sl811    *sl811 = from_timer(sl811, t, timer);
        unsigned long   flags;
        u8              irqstat;
        u8              signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
@@ -1692,7 +1692,7 @@ sl811h_probe(struct platform_device *dev)
        spin_lock_init(&sl811->lock);
        INIT_LIST_HEAD(&sl811->async);
        sl811->board = dev_get_platdata(&dev->dev);
-       setup_timer(&sl811->timer, sl811h_timer, (unsigned long)sl811);
+       timer_setup(&sl811->timer, sl811h_timer, 0);
        sl811->addr_reg = addr_reg;
        sl811->data_reg = data_reg;
 
index babeefd84ffd06318f724abb835f3e537649d195..f5c90217777acb69aeb7a0623bffadeb264d582a 100644 (file)
@@ -585,8 +585,7 @@ static int uhci_start(struct usb_hcd *hcd)
                hcd->self.sg_tablesize = ~0;
 
        spin_lock_init(&uhci->lock);
-       setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
-                       (unsigned long) uhci);
+       timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
        INIT_LIST_HEAD(&uhci->idle_qh_list);
        init_waitqueue_head(&uhci->waitqh);
 
index 49d4edc03cc28bafef6ed4e20a0427c75148b4e7..d40438238938c0ecc04b58e72493ed426799b53d 100644 (file)
@@ -90,9 +90,9 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
        }
 }
 
-static void uhci_fsbr_timeout(unsigned long _uhci)
+static void uhci_fsbr_timeout(struct timer_list *t)
 {
-       struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
+       struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
        unsigned long flags;
 
        spin_lock_irqsave(&uhci->lock, flags);
index 327ba8b8a98b8db77252b419d13ff74fcebd7df5..2424d3020ca364b22792376e36c21462af3b2f62 100644 (file)
@@ -395,14 +395,14 @@ static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 
 #endif
 
-static void compliance_mode_recovery(unsigned long arg)
+static void compliance_mode_recovery(struct timer_list *t)
 {
        struct xhci_hcd *xhci;
        struct usb_hcd *hcd;
        u32 temp;
        int i;
 
-       xhci = (struct xhci_hcd *)arg;
+       xhci = from_timer(xhci, t, comp_mode_recovery_timer);
 
        for (i = 0; i < xhci->num_usb3_ports; i++) {
                temp = readl(xhci->usb3_ports[i]);
@@ -443,8 +443,8 @@ static void compliance_mode_recovery(unsigned long arg)
 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
 {
        xhci->port_status_u0 = 0;
-       setup_timer(&xhci->comp_mode_recovery_timer,
-                   compliance_mode_recovery, (unsigned long)xhci);
+       timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
+                   0);
        xhci->comp_mode_recovery_timer.expires = jiffies +
                        msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
 
index a859c2d33c29137fd9c604f75d3f0b03c10d7cc5..fdceb46d9fc61a0c5eea2f113abd494dc4cc693b 100644 (file)
@@ -555,9 +555,9 @@ static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
                        val, reg, NULL, 0, MOS_WDR_TIMEOUT);
 }
 
-static void mos7840_led_off(unsigned long arg)
+static void mos7840_led_off(struct timer_list *t)
 {
-       struct moschip_port *mcs = (struct moschip_port *) arg;
+       struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
 
        /* Turn off LED */
        mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
@@ -565,9 +565,9 @@ static void mos7840_led_off(unsigned long arg)
                                jiffies + msecs_to_jiffies(LED_OFF_MS));
 }
 
-static void mos7840_led_flag_off(unsigned long arg)
+static void mos7840_led_flag_off(struct timer_list *t)
 {
-       struct moschip_port *mcs = (struct moschip_port *) arg;
+       struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
 
        clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
 }
@@ -2289,12 +2289,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
                        goto error;
                }
 
-               setup_timer(&mos7840_port->led_timer1, mos7840_led_off,
-                           (unsigned long)mos7840_port);
+               timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0);
                mos7840_port->led_timer1.expires =
                        jiffies + msecs_to_jiffies(LED_ON_MS);
-               setup_timer(&mos7840_port->led_timer2, mos7840_led_flag_off,
-                           (unsigned long)mos7840_port);
+               timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off,
+                           0);
                mos7840_port->led_timer2.expires =
                        jiffies + msecs_to_jiffies(LED_OFF_MS);
 
index 48e2e32c97e8a3d912ddb594b8c4c45b6b371749..31b0244419387c52ec5dcd13138b4d6a0bd063f7 100644 (file)
@@ -751,9 +751,9 @@ static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
        mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
 }
 
-static void rts51x_suspend_timer_fn(unsigned long data)
+static void rts51x_suspend_timer_fn(struct timer_list *t)
 {
-       struct rts51x_chip *chip = (struct rts51x_chip *)data;
+       struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
        struct us_data *us = chip->us;
 
        switch (rts51x_get_stat(chip)) {
@@ -917,8 +917,7 @@ static int realtek_cr_autosuspend_setup(struct us_data *us)
        us->proto_handler = rts51x_invoke_transport;
 
        chip->timer_expires = 0;
-       setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn,
-                       (unsigned long)chip);
+       timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0);
        fw5895_init(us);
 
        /* enable autosuspend function of the usb device */
index 38d0504a1bbc50a29a3d639a772648ef385e5692..625f706b8160c27067b106f50704d137c361cf78 100644 (file)
@@ -603,9 +603,9 @@ static void uwb_cnflt_update_work(struct work_struct *work)
        mutex_unlock(&rc->rsvs_mutex);
 }
 
-static void uwb_cnflt_timer(unsigned long arg)
+static void uwb_cnflt_timer(struct timer_list *t)
 {
-       struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
+       struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
 
        queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
 }
@@ -642,7 +642,7 @@ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_i
        }
 
        INIT_LIST_HEAD(&cnflt->rc_node);
-       setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
+       timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
 
        cnflt->rc = rc;
        INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
index 36b5cb62c15dba7b734ffa61460fdeb3a31e79cc..fbdca728bd9f8b947fa9af19103232a0850655c7 100644 (file)
@@ -115,7 +115,7 @@ struct uwb_rc_neh {
        struct list_head list_node;
 };
 
-static void uwb_rc_neh_timer(unsigned long arg);
+static void uwb_rc_neh_timer(struct timer_list *t);
 
 static void uwb_rc_neh_release(struct kref *kref)
 {
@@ -223,7 +223,7 @@ struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
 
        kref_init(&neh->kref);
        INIT_LIST_HEAD(&neh->list_node);
-       setup_timer(&neh->timer, uwb_rc_neh_timer, (unsigned long)neh);
+       timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
 
        neh->rc = rc;
        neh->evt_type = expected_type;
@@ -565,9 +565,9 @@ void uwb_rc_neh_error(struct uwb_rc *rc, int error)
 EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
 
 
-static void uwb_rc_neh_timer(unsigned long arg)
+static void uwb_rc_neh_timer(struct timer_list *t)
 {
-       struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+       struct uwb_rc_neh *neh = from_timer(neh, t, timer);
        struct uwb_rc *rc = neh->rc;
        unsigned long flags;
 
index f5e27247a38feac0a0294a57fa49ad0a260be637..fe25a8cc6fa10e5ce35ec218c29a16a8f5433658 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "uwb-internal.h"
 
-static void uwb_rsv_timer(unsigned long arg);
+static void uwb_rsv_timer(struct timer_list *t);
 
 static const char *rsv_states[] = {
        [UWB_RSV_STATE_NONE]                 = "none            ",
@@ -198,9 +198,9 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
        dev_dbg(dev, "put stream %d\n", rsv->stream);
 }
 
-void uwb_rsv_backoff_win_timer(unsigned long arg)
+void uwb_rsv_backoff_win_timer(struct timer_list *t)
 {
-       struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
+       struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
        struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
        struct device *dev = &rc->uwb_dev.dev;
 
@@ -470,7 +470,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
        INIT_LIST_HEAD(&rsv->rc_node);
        INIT_LIST_HEAD(&rsv->pal_node);
        kref_init(&rsv->kref);
-       setup_timer(&rsv->timer, uwb_rsv_timer, (unsigned long)rsv);
+       timer_setup(&rsv->timer, uwb_rsv_timer, 0);
 
        rsv->rc = rc;
        INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
@@ -939,9 +939,9 @@ static void uwb_rsv_alien_bp_work(struct work_struct *work)
        mutex_unlock(&rc->rsvs_mutex);
 }
 
-static void uwb_rsv_timer(unsigned long arg)
+static void uwb_rsv_timer(struct timer_list *t)
 {
-       struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+       struct uwb_rsv *rsv = from_timer(rsv, t, timer);
 
        queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
 }
@@ -987,8 +987,7 @@ void uwb_rsv_init(struct uwb_rc *rc)
        rc->bow.can_reserve_extra_mases = true;
        rc->bow.total_expired = 0;
        rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
-       setup_timer(&rc->bow.timer, uwb_rsv_backoff_win_timer,
-                       (unsigned long)&rc->bow);
+       timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0);
 
        bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
 }
index 353c0555a1f5dbd6d7e884ca24544ff4226550f4..91326ce093a78fd43572f6c6ad39581c24bb241c 100644 (file)
@@ -329,7 +329,7 @@ void uwb_rsv_put(struct uwb_rsv *rsv);
 bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
 void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
 int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
-void uwb_rsv_backoff_win_timer(unsigned long arg);
+void uwb_rsv_backoff_win_timer(struct timer_list *t);
 void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
 int uwb_rsv_status(struct uwb_rsv *rsv);
 int uwb_rsv_companion_status(struct uwb_rsv *rsv);
index 5e58f5ec0a28e449afa8813a652b1aa3469e0721..2f615b7f1c9f61e1e04a4857069f7a450d757130 100644 (file)
@@ -905,16 +905,6 @@ config FB_LEO
          This is the frame buffer device driver for the SBUS-based Sun ZX
          (leo) frame buffer cards.
 
-config FB_IGA
-       bool "IGA 168x display support"
-       depends on (FB = y) && SPARC32
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
-       help
-         This is the framebuffer device for the INTERGRAPHICS 1680 and
-         successor frame buffer cards.
-
 config FB_XVR500
        bool "Sun XVR-500 3DLABS Wildcat support"
        depends on (FB = y) && PCI && SPARC64
index 8895536a20d648723197affff38d1bbf8fc140a8..115961e0721b8ba1aaab4dcfb3b4032a50760637 100644 (file)
@@ -65,7 +65,6 @@ obj-$(CONFIG_FB_HGA)              += hgafb.o
 obj-$(CONFIG_FB_XVR500)           += sunxvr500.o
 obj-$(CONFIG_FB_XVR2500)          += sunxvr2500.o
 obj-$(CONFIG_FB_XVR1000)          += sunxvr1000.o
-obj-$(CONFIG_FB_IGA)              += igafb.o
 obj-$(CONFIG_FB_APOLLO)           += dnfb.o
 obj-$(CONFIG_FB_Q40)              += q40fb.o
 obj-$(CONFIG_FB_TGA)              += tgafb.o
index 3ec72f19114badf5cb26fe1cb530c04916b9d8c0..a9a8272f7a6eeda70a8e8d1a3c8b6381bbfaea41 100644 (file)
@@ -2272,10 +2272,10 @@ static void aty_bl_exit(struct backlight_device *bd)
 
 static void aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
 {
-       const int ragepro_tbl[] = {
+       static const int ragepro_tbl[] = {
                44, 50, 55, 66, 75, 80, 100
        };
-       const int ragexl_tbl[] = {
+       static const int ragexl_tbl[] = {
                50, 66, 75, 83, 90, 95, 100, 105,
                110, 115, 120, 125, 133, 143, 166
        };
index 1e2ec360f8c16da91b8ee9e921452ef4bda77834..4d77daeecf99b4d45b7876921379b8e1c95ec71d 100644 (file)
@@ -1454,9 +1454,9 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
 /*
  * Timer function for delayed LVDS panel power up/down
  */
-static void radeon_lvds_timer_func(unsigned long data)
+static void radeon_lvds_timer_func(struct timer_list *t)
 {
-       struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
+       struct radeonfb_info *rinfo = from_timer(rinfo, t, lvds_timer);
 
        radeon_engine_idle();
 
@@ -1534,7 +1534,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
 static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs *regs,
                                 unsigned long freq)
 {
-       const struct {
+       static const struct {
                int divider;
                int bitvalue;
        } *post_div,
@@ -2291,9 +2291,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
        rinfo->pdev = pdev;
        
        spin_lock_init(&rinfo->reg_lock);
-       init_timer(&rinfo->lvds_timer);
-       rinfo->lvds_timer.function = radeon_lvds_timer_func;
-       rinfo->lvds_timer.data = (unsigned long)rinfo;
+       timer_setup(&rinfo->lvds_timer, radeon_lvds_timer_func, 0);
 
        c1 = ent->device >> 8;
        c2 = ent->device & 0xff;
index f7c253dd5899f1e887abe7c768b1681a51b4162d..7137c12cbcee30ce60bbdcc27c62fdaf0c9cfbdc 100644 (file)
@@ -1208,9 +1208,11 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
        case 1:
                if (mc & 0x4)
                        break;
+               /* fall through */
        case 2:
                dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP;
                dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET;
+               /* fall through */
        case 0:
                dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP;
                dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET;
@@ -1219,6 +1221,7 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
        case 1:
                if (!(mc & 0x4))
                        break;
+               /* fall through */
        case 2:
                dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP;
                dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET;
index 5f04b4096c428883be0b4032fb9b055916cda5f5..87d5a62bf6ca446fb8141b602c38839363dc120e 100644 (file)
@@ -1518,7 +1518,7 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
 static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
 {
        struct fb_info *fbi = fbdev->fb_info;
-       int bpp;
+       int bpp, ret;
 
        fbi->fbops = &au1200fb_fb_ops;
 
@@ -1546,15 +1546,14 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
        }
 
        fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
-       if (!fbi->pseudo_palette) {
+       if (!fbi->pseudo_palette)
                return -ENOMEM;
-       }
 
-       if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
+       ret = fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0);
+       if (ret < 0) {
                print_err("Fail to allocate colormap (%d entries)",
-                          AU1200_LCD_NBR_PALETTE_ENTRIES);
-               kfree(fbi->pseudo_palette);
-               return -EFAULT;
+                         AU1200_LCD_NBR_PALETTE_ENTRIES);
+               return ret;
        }
 
        strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
@@ -1668,10 +1667,6 @@ static int au1200fb_drv_probe(struct platform_device *dev)
        printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
        printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
 
-       /* shut gcc up */
-       ret = 0;
-       fbdev = NULL;
-
        for (plane = 0; plane < device_count; ++plane) {
                bpp = winbpp(win->w[plane].mode_winctrl1);
                if (win->w[plane].xres == 0)
@@ -1681,8 +1676,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
 
                fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
                                        &dev->dev);
-               if (!fbi)
+               if (!fbi) {
+                       ret = -ENOMEM;
                        goto failed;
+               }
 
                _au1200fb_infos[plane] = fbi;
                fbdev = fbi->par;
@@ -1701,7 +1698,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
                if (!fbdev->fb_mem) {
                        print_err("fail to allocate frambuffer (size: %dK))",
                                  fbdev->fb_len / 1024);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto failed;
                }
 
                /*
@@ -1718,7 +1716,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
                print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
 
                /* Init FB data */
-               if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
+               ret = au1200fb_init_fbinfo(fbdev);
+               if (ret < 0)
                        goto failed;
 
                /* Register new framebuffer */
@@ -1758,21 +1757,26 @@ static int au1200fb_drv_probe(struct platform_device *dev)
        return 0;
 
 failed:
-       /* NOTE: This only does the current plane/window that failed; others are still active */
-       if (fbi) {
+       for (plane = 0; plane < device_count; ++plane) {
+               fbi = _au1200fb_infos[plane];
+               if (!fbi)
+                       break;
+
+               /* Clean up all probe data */
+               unregister_framebuffer(fbi);
                if (fbi->cmap.len != 0)
                        fb_dealloc_cmap(&fbi->cmap);
                kfree(fbi->pseudo_palette);
+
+               framebuffer_release(fbi);
+               _au1200fb_infos[plane] = NULL;
        }
-       if (plane == 0)
-               free_irq(AU1200_LCD_INT, (void*)dev);
        return ret;
 }
 
 static int au1200fb_drv_remove(struct platform_device *dev)
 {
        struct au1200fb_platdata *pd = platform_get_drvdata(dev);
-       struct au1200fb_device *fbdev;
        struct fb_info *fbi;
        int plane;
 
@@ -1781,7 +1785,6 @@ static int au1200fb_drv_remove(struct platform_device *dev)
 
        for (plane = 0; plane < device_count; ++plane)  {
                fbi = _au1200fb_infos[plane];
-               fbdev = fbi->par;
 
                /* Clean up all probe data */
                unregister_framebuffer(fbi);
index d992aa5eb3f0dc6557b23b4ec3e4868301c5ede8..b3be06dd290882e5c7d75db66977e73f6dcae128 100644 (file)
@@ -1477,10 +1477,12 @@ static void init_vgachip(struct fb_info *info)
                mdelay(100);
                /* mode */
                vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
-       case BT_GD5480:  /* fall through */
+               /* fall through */
+       case BT_GD5480:
                /* from Klaus' NetBSD driver: */
                vga_wgfx(cinfo->regbase, CL_GR2F, 0x00);
-       case BT_ALPINE:  /* fall through */
+               /* fall through */
+       case BT_ALPINE:
                /* put blitter into 542x compat */
                vga_wgfx(cinfo->regbase, CL_GR33, 0x00);
                break;
index 6026c60fc1007e007ec568d23ad26fa3910795e6..261522fabdac89ae2644089aba612dee880b4d24 100644 (file)
@@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
        {{ 1, 2}},      /* 1152x870, 75Hz */
        {{ 0, 1}},      /* 1280x960, 75Hz */
        {{ 0, 1}},      /* 1280x1024, 75Hz */
+       {{ 1, 2}},      /* 1152x768, 60Hz */
+       {{ 0, 1}},      /* 1600x1024, 60Hz */
 };
 
index 04612f938bab1fc3eecf457d070fe501b8a1a3de..929ca472c5242ef50bd9653cffe315d72daac2aa 100644 (file)
@@ -395,10 +395,10 @@ static void fb_flashcursor(struct work_struct *work)
        console_unlock();
 }
 
-static void cursor_timer_handler(unsigned long dev_addr)
+static void cursor_timer_handler(struct timer_list *t)
 {
-       struct fb_info *info = (struct fb_info *) dev_addr;
-       struct fbcon_ops *ops = info->fbcon_par;
+       struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
+       struct fb_info *info = ops->info;
 
        queue_work(system_power_efficient_wq, &info->queue);
        mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
@@ -414,8 +414,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
                if (!info->queue.func)
                        INIT_WORK(&info->queue, fb_flashcursor);
 
-               setup_timer(&ops->cursor_timer, cursor_timer_handler,
-                           (unsigned long) info);
+               timer_setup(&ops->cursor_timer, cursor_timer_handler, 0);
                mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
                ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
        }
@@ -714,6 +713,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
 
        if (!err) {
                ops->cur_blink_jiffies = HZ / 5;
+               ops->info = info;
                info->fbcon_par = ops;
 
                if (vc)
@@ -962,6 +962,7 @@ static const char *fbcon_startup(void)
        ops->graphics = 1;
        ops->cur_rotate = -1;
        ops->cur_blink_jiffies = HZ / 5;
+       ops->info = info;
        info->fbcon_par = ops;
        if (initial_rotation != -1)
                p->con_rotate = initial_rotation;
index 18f3ac14423706adc006f37bee55b1241627f99d..9f7744fbc962dc1c19f6b2f4b9d518126fedf0cc 100644 (file)
@@ -69,6 +69,7 @@ struct fbcon_ops {
        struct timer_list cursor_timer; /* Cursor timer */
        struct fb_cursor cursor_state;
        struct display *p;
+       struct fb_info *info;
         int    currcon;                        /* Current VC. */
        int    cur_blink_jiffies;
        int    cursor_flash;
index 7b1492d34e989ab92a1a4819192afc00294af4d7..5505fa00c6348a26597aaa6f65a90092aff5c14c 100644 (file)
@@ -115,7 +115,7 @@ static struct fb_ops dn_fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-struct fb_var_screeninfo dnfb_var = {
+static const struct fb_var_screeninfo dnfb_var = {
        .xres           = 1280,
        .yres           = 1024,
        .xres_virtual   = 2048,
@@ -242,16 +242,13 @@ static int dnfb_probe(struct platform_device *dev)
        info->screen_base = (u_char *) info->fix.smem_start;
 
        err = fb_alloc_cmap(&info->cmap, 2, 0);
-       if (err < 0) {
-               framebuffer_release(info);
-               return err;
-       }
+       if (err < 0)
+               goto release_framebuffer;
 
        err = register_framebuffer(info);
        if (err < 0) {
                fb_dealloc_cmap(&info->cmap);
-               framebuffer_release(info);
-               return err;
+               goto release_framebuffer;
        }
        platform_set_drvdata(dev, info);
 
@@ -265,6 +262,10 @@ static int dnfb_probe(struct platform_device *dev)
 
        printk("apollo frame buffer alive and kicking !\n");
        return err;
+
+release_framebuffer:
+       framebuffer_release(info);
+       return err;
 }
 
 static struct platform_driver dnfb_driver = {
index 7f6c9e6cfc6c99d8d9912db5d2f78242923f51c5..3b70044773b67566b6c7ebf516bc23dde166f95b 100644 (file)
@@ -304,12 +304,18 @@ static int goldfish_fb_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id goldfish_fb_of_match[] = {
+       { .compatible = "google,goldfish-fb", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
 
 static struct platform_driver goldfish_fb_driver = {
        .probe          = goldfish_fb_probe,
        .remove         = goldfish_fb_remove,
        .driver = {
-               .name = "goldfish_fb"
+               .name = "goldfish_fb",
+               .of_match_table = goldfish_fb_of_match,
        }
 };
 
diff --git a/drivers/video/fbdev/igafb.c b/drivers/video/fbdev/igafb.c
deleted file mode 100644 (file)
index 486f188..0000000
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- *  linux/drivers/video/igafb.c -- Frame buffer device for IGA 1682
- *
- *      Copyright (C) 1998  Vladimir Roganov and Gleb Raiko
- *
- *  This driver is partly based on the Frame buffer device for ATI Mach64
- *  and partially on VESA-related code.
- *
- *      Copyright (C) 1997-1998  Geert Uytterhoeven
- *      Copyright (C) 1998  Bernd Harries
- *      Copyright (C) 1998  Eddie C. Dost  (ecd@skynet.be)
- *
- *  This file is subject to the terms and conditions of the GNU General Public
- *  License. See the file COPYING in the main directory of this archive for
- *  more details.
- */
-
-/******************************************************************************
-
-  TODO:
-       Despite of IGA Card has advanced graphic acceleration, 
-       initial version is almost dummy and does not support it.
-       Support for video modes and acceleration must be added
-       together with accelerated X-Windows driver implementation.
-
-       Most important thing at this moment is that we have working
-       JavaEngine1  console & X  with new console interface.
-
-******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/nvram.h>
-
-#include <asm/io.h>
-
-#ifdef CONFIG_SPARC
-#include <asm/prom.h>
-#include <asm/pcic.h>
-#endif
-
-#include <video/iga.h>
-
-struct pci_mmap_map {
-    unsigned long voff;
-    unsigned long poff;
-    unsigned long size;
-    unsigned long prot_flag;
-    unsigned long prot_mask;
-};
-
-struct iga_par {
-       struct pci_mmap_map *mmap_map;
-       unsigned long frame_buffer_phys;
-       unsigned long io_base;
-};
-
-struct fb_info fb_info;
-
-struct fb_fix_screeninfo igafb_fix __initdata = {
-        .id            = "IGA 1682",
-       .type           = FB_TYPE_PACKED_PIXELS,
-       .mmio_len       = 1000
-};
-
-struct fb_var_screeninfo default_var = {
-       /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
-       .xres           = 640,
-       .yres           = 480,
-       .xres_virtual   = 640,
-       .yres_virtual   = 480,
-       .bits_per_pixel = 8,
-       .red            = {0, 8, 0 },
-       .green          = {0, 8, 0 },
-       .blue           = {0, 8, 0 },
-       .height         = -1,
-       .width          = -1,
-       .accel_flags    = FB_ACCEL_NONE,
-       .pixclock       = 39722,
-       .left_margin    = 48,
-       .right_margin   = 16,
-       .upper_margin   = 33,
-       .lower_margin   = 10,
-       .hsync_len      = 96,
-       .vsync_len      = 2,
-       .vmode          = FB_VMODE_NONINTERLACED
-};
-
-#ifdef CONFIG_SPARC
-struct fb_var_screeninfo default_var_1024x768 __initdata = {
-       /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
-       .xres           = 1024,
-       .yres           = 768,
-       .xres_virtual   = 1024,
-       .yres_virtual   = 768,
-       .bits_per_pixel = 8,
-       .red            = {0, 8, 0 },
-       .green          = {0, 8, 0 },
-       .blue           = {0, 8, 0 },
-       .height         = -1,
-       .width          = -1,
-       .accel_flags    = FB_ACCEL_NONE,
-       .pixclock       = 12699,
-       .left_margin    = 176,
-       .right_margin   = 16,
-       .upper_margin   = 28,
-       .lower_margin   = 1,
-       .hsync_len      = 96,
-       .vsync_len      = 3,
-       .vmode          = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1152x900 __initdata = {
-       /* 1152x900, 76 Hz, Non-Interlaced (110.0 MHz dotclock) */
-       .xres           = 1152,
-       .yres           = 900,
-       .xres_virtual   = 1152,
-       .yres_virtual   = 900,
-       .bits_per_pixel = 8,
-       .red            = { 0, 8, 0 },
-       .green          = { 0, 8, 0 },
-       .blue           = { 0, 8, 0 },
-       .height         = -1,
-       .width          = -1,
-       .accel_flags    = FB_ACCEL_NONE,
-       .pixclock       = 9091,
-       .left_margin    = 234,
-       .right_margin   = 24,
-       .upper_margin   = 34,
-       .lower_margin   = 3,
-       .hsync_len      = 100,
-       .vsync_len      = 3,
-       .vmode          = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1280x1024 __initdata = {
-       /* 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock) */
-       .xres           = 1280,
-       .yres           = 1024,
-       .xres_virtual   = 1280,
-       .yres_virtual   = 1024,
-       .bits_per_pixel = 8,
-       .red            = {0, 8, 0 }, 
-       .green          = {0, 8, 0 },
-       .blue           = {0, 8, 0 },
-       .height         = -1,
-       .width          = -1,
-       .accel_flags    = 0,
-       .pixclock       = 7408,
-       .left_margin    = 248,
-       .right_margin   = 16,
-       .upper_margin   = 38,
-       .lower_margin   = 1,
-       .hsync_len      = 144,
-       .vsync_len      = 3,
-       .vmode          = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-/*
- *   Memory-mapped I/O functions for Sparc PCI
- *
- * On sparc we happen to access I/O with memory mapped functions too.
- */ 
-#define pci_inb(par, reg)        readb(par->io_base+(reg))
-#define pci_outb(par, val, reg)  writeb(val, par->io_base+(reg))
-
-static inline unsigned int iga_inb(struct iga_par *par, unsigned int reg,
-                                  unsigned int idx)
-{
-        pci_outb(par, idx, reg);
-        return pci_inb(par, reg + 1);
-}
-
-static inline void iga_outb(struct iga_par *par, unsigned char val,
-                           unsigned int reg, unsigned int idx )
-{
-        pci_outb(par, idx, reg);
-        pci_outb(par, val, reg+1);
-}
-
-#endif /* CONFIG_SPARC */
-
-/*
- *  Very important functionality for the JavaEngine1 computer:
- *  make screen border black (usign special IGA registers) 
- */
-static void iga_blank_border(struct iga_par *par)
-{
-        int i;
-#if 0
-       /*
-        * PROM does this for us, so keep this code as a reminder
-        * about required read from 0x3DA and writing of 0x20 in the end.
-        */
-       (void) pci_inb(par, 0x3DA);             /* required for every access */
-       pci_outb(par, IGA_IDX_VGA_OVERSCAN, IGA_ATTR_CTL);
-       (void) pci_inb(par, IGA_ATTR_CTL+1);
-       pci_outb(par, 0x38, IGA_ATTR_CTL);
-       pci_outb(par, 0x20, IGA_ATTR_CTL);      /* re-enable visual */
-#endif
-       /*
-        * This does not work as it was designed because the overscan
-        * color is looked up in the palette. Therefore, under X11
-        * overscan changes color.
-        */
-       for (i=0; i < 3; i++)
-               iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i);
-}
-
-#ifdef CONFIG_SPARC
-static int igafb_mmap(struct fb_info *info,
-                     struct vm_area_struct *vma)
-{
-       struct iga_par *par = (struct iga_par *)info->par;
-       unsigned int size, page, map_size = 0;
-       unsigned long map_offset = 0;
-       int i;
-
-       if (!par->mmap_map)
-               return -ENXIO;
-
-       size = vma->vm_end - vma->vm_start;
-
-       /* Each page, see which map applies */
-       for (page = 0; page < size; ) {
-               map_size = 0;
-               for (i = 0; par->mmap_map[i].size; i++) {
-                       unsigned long start = par->mmap_map[i].voff;
-                       unsigned long end = start + par->mmap_map[i].size;
-                       unsigned long offset = (vma->vm_pgoff << PAGE_SHIFT) + page;
-
-                       if (start > offset)
-                               continue;
-                       if (offset >= end)
-                               continue;
-
-                       map_size = par->mmap_map[i].size - (offset - start);
-                       map_offset = par->mmap_map[i].poff + (offset - start);
-                       break;
-               }
-               if (!map_size) {
-                       page += PAGE_SIZE;
-                       continue;
-               }
-               if (page + map_size > size)
-                       map_size = size - page;
-
-               pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
-               pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
-
-               if (remap_pfn_range(vma, vma->vm_start + page,
-                       map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
-                       return -EAGAIN;
-
-               page += map_size;
-       }
-
-       if (!map_size)
-               return -EINVAL;
-
-       vma->vm_flags |= VM_IO;
-       return 0;
-}
-#endif /* CONFIG_SPARC */
-
-static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green,
-                           unsigned blue, unsigned transp,
-                           struct fb_info *info)
-{
-        /*
-         *  Set a single color register. The values supplied are
-         *  already rounded down to the hardware's capabilities
-         *  (according to the entries in the `var' structure). Return
-         *  != 0 for invalid regno.
-         */
-       struct iga_par *par = (struct iga_par *)info->par;
-
-        if (regno >= info->cmap.len)
-                return 1;
-
-       pci_outb(par, regno, DAC_W_INDEX);
-       pci_outb(par, red,   DAC_DATA);
-       pci_outb(par, green, DAC_DATA);
-       pci_outb(par, blue,  DAC_DATA);
-
-       if (regno < 16) {
-               switch (info->var.bits_per_pixel) {
-               case 16:
-                       ((u16*)(info->pseudo_palette))[regno] = 
-                               (regno << 10) | (regno << 5) | regno;
-                       break;
-               case 24:
-                       ((u32*)(info->pseudo_palette))[regno] = 
-                               (regno << 16) | (regno << 8) | regno;
-               break;
-               case 32:
-                       { int i;
-                       i = (regno << 8) | regno;
-                       ((u32*)(info->pseudo_palette))[regno] = (i << 16) | i;
-                       }
-                       break;
-               }
-       }
-       return 0;
-}
-
-/*
- * Framebuffer option structure
- */
-static struct fb_ops igafb_ops = {
-       .owner          = THIS_MODULE,
-       .fb_setcolreg   = igafb_setcolreg,
-       .fb_fillrect    = cfb_fillrect,
-       .fb_copyarea    = cfb_copyarea,
-       .fb_imageblit   = cfb_imageblit,
-#ifdef CONFIG_SPARC
-       .fb_mmap        = igafb_mmap,
-#endif
-};
-
-static int __init iga_init(struct fb_info *info, struct iga_par *par)
-{
-        char vramsz = iga_inb(par, IGA_EXT_CNTRL, IGA_IDX_EXT_BUS_CNTL) 
-                                                        & MEM_SIZE_ALIAS;
-       int video_cmap_len;
-
-        switch (vramsz) {
-        case MEM_SIZE_1M:
-                info->fix.smem_len = 0x100000;
-                break;
-        case MEM_SIZE_2M:
-                info->fix.smem_len = 0x200000;
-                break;
-        case MEM_SIZE_4M:
-        case MEM_SIZE_RESERVED:
-                info->fix.smem_len = 0x400000;
-                break;
-        }
-
-        if (info->var.bits_per_pixel > 8) 
-                video_cmap_len = 16;
-        else 
-                video_cmap_len = 256;
-
-       info->fbops = &igafb_ops;
-       info->flags = FBINFO_DEFAULT;
-
-       fb_alloc_cmap(&info->cmap, video_cmap_len, 0);
-
-       if (register_framebuffer(info) < 0)
-               return 0;
-
-       fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n",
-               info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20);
-
-       iga_blank_border(par); 
-       return 1;
-}
-
-static int __init igafb_init(void)
-{
-        struct fb_info *info;
-        struct pci_dev *pdev;
-        struct iga_par *par;
-       unsigned long addr;
-       int size, iga2000 = 0;
-
-       if (fb_get_options("igafb", NULL))
-               return -ENODEV;
-
-        pdev = pci_get_device(PCI_VENDOR_ID_INTERG,
-                               PCI_DEVICE_ID_INTERG_1682, 0);
-       if (pdev == NULL) {
-               /*
-                * XXX We tried to use cyber2000fb.c for IGS 2000.
-                * But it does not initialize the chip in JavaStation-E, alas.
-                */
-               pdev = pci_get_device(PCI_VENDOR_ID_INTERG, 0x2000, 0);
-               if(pdev == NULL) {
-                       return -ENXIO;
-               }
-               iga2000 = 1;
-       }
-       /* We leak a reference here but as it cannot be unloaded this is
-          fine. If you write unload code remember to free it in unload */
-       
-       size = sizeof(struct iga_par) + sizeof(u32)*16;
-
-       info = framebuffer_alloc(size, &pdev->dev);
-        if (!info) {
-                printk("igafb_init: can't alloc fb_info\n");
-                pci_dev_put(pdev);
-                return -ENOMEM;
-        }
-
-       par = info->par;
-
-       if ((addr = pdev->resource[0].start) == 0) {
-                printk("igafb_init: no memory start\n");
-               kfree(info);
-               pci_dev_put(pdev);
-               return -ENXIO;
-       }
-
-       if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) {
-                printk("igafb_init: can't remap %lx[2M]\n", addr);
-               kfree(info);
-               pci_dev_put(pdev);
-               return -ENXIO;
-       }
-
-       par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK;
-
-#ifdef CONFIG_SPARC
-       /*
-        * The following is sparc specific and this is why:
-        *
-        * IGS2000 has its I/O memory mapped and we want
-        * to generate memory cycles on PCI, e.g. do ioremap(),
-        * then readb/writeb() as in Documentation/io-mapping.txt.
-        *
-        * IGS1682 is more traditional, it responds to PCI I/O
-        * cycles, so we want to access it with inb()/outb().
-        *
-        * On sparc, PCIC converts CPU memory access within
-        * phys window 0x3000xxxx into PCI I/O cycles. Therefore
-        * we may use readb/writeb to access them with IGS1682.
-        *
-        * We do not take io_base_phys from resource[n].start
-        * on IGS1682 because that chip is BROKEN. It does not
-        * have a base register for I/O. We just "know" what its
-        * I/O addresses are.
-        */
-       if (iga2000) {
-               igafb_fix.mmio_start = par->frame_buffer_phys | 0x00800000;
-       } else {
-               igafb_fix.mmio_start = 0x30000000;      /* XXX */
-       }
-       if ((par->io_base = (int) ioremap(igafb_fix.mmio_start, igafb_fix.smem_len)) == 0) {
-                printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start);
-               iounmap((void *)info->screen_base);
-               kfree(info);
-               pci_dev_put(pdev);
-               return -ENXIO;
-       }
-
-       /*
-        * Figure mmap addresses from PCI config space.
-        * We need two regions: for video memory and for I/O ports.
-        * Later one can add region for video coprocessor registers.
-        * However, mmap routine loops until size != 0, so we put
-        * one additional region with size == 0. 
-        */
-
-       par->mmap_map = kzalloc(4 * sizeof(*par->mmap_map), GFP_ATOMIC);
-       if (!par->mmap_map) {
-               printk("igafb_init: can't alloc mmap_map\n");
-               iounmap((void *)par->io_base);
-               iounmap(info->screen_base);
-               kfree(info);
-               pci_dev_put(pdev);
-               return -ENOMEM;
-       }
-
-       /*
-        * Set default vmode and cmode from PROM properties.
-        */
-       {
-               struct device_node *dp = pci_device_to_OF_node(pdev);
-                int node = dp->node;
-                int width = prom_getintdefault(node, "width", 1024);
-                int height = prom_getintdefault(node, "height", 768);
-                int depth = prom_getintdefault(node, "depth", 8);
-                switch (width) {
-                    case 1024:
-                        if (height == 768)
-                            default_var = default_var_1024x768;
-                        break;
-                    case 1152:
-                        if (height == 900)
-                            default_var = default_var_1152x900;
-                        break;
-                    case 1280:
-                        if (height == 1024)
-                            default_var = default_var_1280x1024;
-                        break;
-                    default:
-                        break;
-                }
-
-                switch (depth) {
-                    case 8:
-                        default_var.bits_per_pixel = 8;
-                        break;
-                    case 16:
-                        default_var.bits_per_pixel = 16;
-                        break;
-                    case 24:
-                        default_var.bits_per_pixel = 24;
-                        break;
-                    case 32:
-                        default_var.bits_per_pixel = 32;
-                        break;
-                    default:
-                        break;
-                }
-            }
-
-#endif
-       igafb_fix.smem_start = (unsigned long) info->screen_base;
-       igafb_fix.line_length = default_var.xres*(default_var.bits_per_pixel/8);
-       igafb_fix.visual = default_var.bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-
-       info->var = default_var;
-       info->fix = igafb_fix;
-       info->pseudo_palette = (void *)(par + 1);
-
-       if (!iga_init(info, par)) {
-               iounmap((void *)par->io_base);
-               iounmap(info->screen_base);
-               kfree(par->mmap_map);
-               kfree(info);
-               return -ENODEV;
-        }
-
-#ifdef CONFIG_SPARC
-           /*
-            * Add /dev/fb mmap values.
-            */
-           
-           /* First region is for video memory */
-           par->mmap_map[0].voff = 0x0;  
-           par->mmap_map[0].poff = par->frame_buffer_phys & PAGE_MASK;
-           par->mmap_map[0].size = info->fix.smem_len & PAGE_MASK;
-           par->mmap_map[0].prot_mask = SRMMU_CACHE;
-           par->mmap_map[0].prot_flag = SRMMU_WRITE;
-
-           /* Second region is for I/O ports */
-           par->mmap_map[1].voff = par->frame_buffer_phys & PAGE_MASK;
-           par->mmap_map[1].poff = info->fix.smem_start & PAGE_MASK;
-           par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */
-           par->mmap_map[1].prot_mask = SRMMU_CACHE;
-           par->mmap_map[1].prot_flag = SRMMU_WRITE;
-#endif /* CONFIG_SPARC */
-
-       return 0;
-}
-
-static int __init igafb_setup(char *options)
-{
-    char *this_opt;
-
-    if (!options || !*options)
-        return 0;
-
-    while ((this_opt = strsep(&options, ",")) != NULL) {
-    }
-    return 0;
-}
-
-module_init(igafb_init);
-MODULE_LICENSE("GPL");
-static struct pci_device_id igafb_pci_tbl[] = {
-       { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
-       { }
-};
-
-MODULE_DEVICE_TABLE(pci, igafb_pci_tbl);
index d31ed4e2c46f1020ab84e8899337c3bd82fabd18..83fec573cceb78f6fa9b5a13e1787bdd2fcb5e41 100644 (file)
@@ -937,15 +937,11 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
 {
        u32 m1, m2, n, p1, p2, n1, testm;
        u32 f_vco, p, p_best = 0, m, f_out = 0;
-       u32 err_max, err_target, err_best = 10000000;
-       u32 n_best = 0, m_best = 0, f_best, f_err;
+       u32 err_best = 10000000;
+       u32 n_best = 0, m_best = 0, f_err;
        u32 p_min, p_max, p_inc, div_max;
        struct pll_min_max *pll = &plls[index];
 
-       /* Accept 0.5% difference, but aim for 0.1% */
-       err_max = 5 * clock / 1000;
-       err_target = clock / 1000;
-
        DBG_MSG("Clock is %d\n", clock);
 
        div_max = pll->max_vco / clock;
@@ -992,7 +988,6 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
                                        m_best = testm;
                                        n_best = n;
                                        p_best = p;
-                                       f_best = f_out;
                                        err_best = f_err;
                                }
                        }
index b9b284d79631d35b11f65536d516314b6157b956..838869c6490c2e4dcf87a4b35d5103d62976657b 100644 (file)
@@ -2056,7 +2056,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
 
        minfo = kzalloc(sizeof(*minfo), GFP_KERNEL);
        if (!minfo)
-               return -1;
+               return -ENOMEM;
 
        minfo->pcidev = pdev;
        minfo->dead = 0;
index 7846f0e8bbbb55f9ad691b733b6d2333452a672e..79b1dc7f042b220277a997169efdee2757784554 100644 (file)
 #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
 
 #define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT        (1 << 6)
-#define MXSFB_SYNC_DOTCLK_FALLING_ACT  (1 << 7) /* negtive edge sampling */
+#define MXSFB_SYNC_DOTCLK_FALLING_ACT  (1 << 7) /* negative edge sampling */
 
 enum mxsfb_devtype {
        MXSFB_V3,
@@ -788,7 +788,16 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
 
        if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
                host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
-       if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+
+       /*
+        * The PIXDATA flags of the display_flags enum are controller
+        * centric, e.g. NEGEDGE means drive data on negative edge.
+        * However, the drivers flag is display centric: Sample the
+        * data on negative (falling) edge. Therefore, check for the
+        * POSEDGE flag:
+        * drive on positive edge => sample on negative edge
+        */
+       if (vm.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
                host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
 
 put_display_node:
index a4ee65b8f9187f8788aec8ae9a92e0169319c38c..6199d48061938c536e7f9803e4286d548e160f15 100644 (file)
@@ -474,7 +474,7 @@ static void auto_update_complete(void *data)
                          jiffies + HWA742_AUTO_UPDATE_TIME);
 }
 
-static void hwa742_update_window_auto(unsigned long arg)
+static void hwa742_update_window_auto(struct timer_list *unused)
 {
        LIST_HEAD(req_list);
        struct hwa742_request *last;
@@ -1002,9 +1002,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
        hwa742.auto_update_window.height = fbdev->panel->y_res;
        hwa742.auto_update_window.format = 0;
 
-       init_timer(&hwa742.auto_update_timer);
-       hwa742.auto_update_timer.function = hwa742_update_window_auto;
-       hwa742.auto_update_timer.data = 0;
+       timer_setup(&hwa742.auto_update_timer, hwa742_update_window_auto, 0);
 
        hwa742.prev_color_mode = -1;
        hwa742.prev_flags = 0;
index 30d49f3800b334b0a3f6b8ed29ea437d30cb632e..8e1d60d48dbb0edb507093581bd45833bd563d0c 100644 (file)
@@ -3988,7 +3988,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
 }
 
 #ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
 {
        DSSERR("TE not received for 250ms!\n");
 }
@@ -5298,9 +5298,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
                             dsi_framedone_timeout_work_callback);
 
 #ifdef DSI_CATCH_MISSING_TE
-       init_timer(&dsi->te_timer);
-       dsi->te_timer.function = dsi_te_timeout;
-       dsi->te_timer.data = 0;
+       timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
 #endif
 
        res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
index 1d7c012f09dbb70cb0c5d1aed909272c94c4b4b3..e08e5664e330f84adcb99afbfda605556611fe64 100644 (file)
@@ -1477,7 +1477,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
 static int omapfb_parse_vram_param(const char *param, int max_entries,
                unsigned long *sizes, unsigned long *paddrs)
 {
-       int fbnum;
+       unsigned int fbnum;
        unsigned long size;
        unsigned long paddr = 0;
        char *p, *start;
index 933619da1a94b94e97c5c5f81b082b2eab7385df..55fbb432c05352271380f25fd5da5c5502a7af51 100644 (file)
@@ -512,28 +512,26 @@ pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
 
 #ifdef PXA3XX_GCU_DEBUG_TIMER
 static struct timer_list pxa3xx_gcu_debug_timer;
+static struct pxa3xx_gcu_priv *debug_timer_priv;
 
-static void pxa3xx_gcu_debug_timedout(unsigned long ptr)
+static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
 {
-       struct pxa3xx_gcu_priv *priv = (struct pxa3xx_gcu_priv *) ptr;
+       struct pxa3xx_gcu_priv *priv = debug_timer_priv;
 
        QERROR("Timer DUMP");
 
-       /* init the timer structure */
-       init_timer(&pxa3xx_gcu_debug_timer);
-       pxa3xx_gcu_debug_timer.function = pxa3xx_gcu_debug_timedout;
-       pxa3xx_gcu_debug_timer.data = ptr;
-       pxa3xx_gcu_debug_timer.expires = jiffies + 5*HZ; /* one second */
-
-       add_timer(&pxa3xx_gcu_debug_timer);
+       mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
 }
 
-static void pxa3xx_gcu_init_debug_timer(void)
+static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
 {
-       pxa3xx_gcu_debug_timedout((unsigned long) &pxa3xx_gcu_debug_timer);
+       /* init the timer structure */
+       debug_timer_priv = priv;
+       timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
+       pxa3xx_gcu_debug_timedout(NULL);
 }
 #else
-static inline void pxa3xx_gcu_init_debug_timer(void) {}
+static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
 #endif
 
 static int
@@ -670,7 +668,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, priv);
        priv->resource_mem = r;
        pxa3xx_gcu_reset(priv);
-       pxa3xx_gcu_init_debug_timer();
+       pxa3xx_gcu_init_debug_timer(priv);
 
        dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
                        (void *) r->start, (void *) priv->shared_phys,
index fc2aaa5aca2347e705c6eb1623ec188b5262e498..15ae50063296ed823836066b7e3a4a26a2021d9b 100644 (file)
@@ -323,13 +323,11 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                 * according to the RGB bitfield information.
                 */
                if (regno < 16) {
-                       u32 *pal = fbi->fb.pseudo_palette;
-
                        val  = chan_to_field(red, &fbi->fb.var.red);
                        val |= chan_to_field(green, &fbi->fb.var.green);
                        val |= chan_to_field(blue, &fbi->fb.var.blue);
 
-                       pal[regno] = val;
+                       fbi->pseudo_palette[regno] = val;
                        ret = 0;
                }
                break;
@@ -1132,12 +1130,10 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
        struct sa1100fb_info *fbi;
        unsigned i;
 
-       fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16,
-                     GFP_KERNEL);
+       fbi = devm_kzalloc(dev, sizeof(struct sa1100fb_info), GFP_KERNEL);
        if (!fbi)
                return NULL;
 
-       memset(fbi, 0, sizeof(struct sa1100fb_info));
        fbi->dev = dev;
 
        strcpy(fbi->fb.fix.id, SA1100_NAME);
@@ -1159,7 +1155,7 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
        fbi->fb.fbops           = &sa1100fb_ops;
        fbi->fb.flags           = FBINFO_DEFAULT;
        fbi->fb.monspecs        = monspecs;
-       fbi->fb.pseudo_palette  = (fbi + 1);
+       fbi->fb.pseudo_palette  = fbi->pseudo_palette;
 
        fbi->rgb[RGB_4]         = &rgb_4;
        fbi->rgb[RGB_8]         = &rgb_8;
@@ -1218,48 +1214,42 @@ static int sa1100fb_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0 || !res)
+       if (irq < 0)
                return -EINVAL;
 
-       if (!request_mem_region(res->start, resource_size(res), "LCD"))
-               return -EBUSY;
-
        fbi = sa1100fb_init_fbinfo(&pdev->dev);
-       ret = -ENOMEM;
        if (!fbi)
-               goto failed;
-
-       fbi->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(fbi->clk)) {
-               ret = PTR_ERR(fbi->clk);
-               fbi->clk = NULL;
-               goto failed;
-       }
+               return -ENOMEM;
 
-       fbi->base = ioremap(res->start, resource_size(res));
-       if (!fbi->base)
-               goto failed;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       fbi->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(fbi->base))
+               return PTR_ERR(fbi->base);
 
-       /* Initialize video memory */
-       ret = sa1100fb_map_video_memory(fbi);
-       if (ret)
-               goto failed;
+       fbi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(fbi->clk))
+               return PTR_ERR(fbi->clk);
 
-       ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi);
+       ret = devm_request_irq(&pdev->dev, irq, sa1100fb_handle_irq, 0,
+                              "LCD", fbi);
        if (ret) {
                dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
-               goto failed;
+               return ret;
        }
 
        if (machine_is_shannon()) {
-               ret = gpio_request_one(SHANNON_GPIO_DISP_EN,
+               ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN,
                        GPIOF_OUT_INIT_LOW, "display enable");
                if (ret)
-                       goto err_free_irq;
+                       return ret;
        }
 
+       /* Initialize video memory */
+       ret = sa1100fb_map_video_memory(fbi);
+       if (ret)
+               return ret;
+
        /*
         * This makes sure that our colour bitfield
         * descriptors are correctly initialised.
@@ -1269,8 +1259,11 @@ static int sa1100fb_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, fbi);
 
        ret = register_framebuffer(&fbi->fb);
-       if (ret < 0)
-               goto err_reg_fb;
+       if (ret < 0) {
+               dma_free_wc(fbi->dev, fbi->map_size, fbi->map_cpu,
+                           fbi->map_dma);
+               return ret;
+       }
 
 #ifdef CONFIG_CPU_FREQ
        fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
@@ -1281,20 +1274,6 @@ static int sa1100fb_probe(struct platform_device *pdev)
 
        /* This driver cannot be unloaded at the moment */
        return 0;
-
- err_reg_fb:
-       if (machine_is_shannon())
-               gpio_free(SHANNON_GPIO_DISP_EN);
- err_free_irq:
-       free_irq(irq, fbi);
- failed:
-       if (fbi)
-               iounmap(fbi->base);
-       if (fbi->clk)
-               clk_put(fbi->clk);
-       kfree(fbi);
-       release_mem_region(res->start, resource_size(res));
-       return ret;
 }
 
 static struct platform_driver sa1100fb_driver = {
index 0139d13377a5efaf5e6667c4db7ef8ce99531257..7a1a9ca33cec55d81d579c31e7b2a3320491a168 100644 (file)
@@ -69,6 +69,8 @@ struct sa1100fb_info {
 
        const struct sa1100fb_mach_info *inf;
        struct clk *clk;
+
+       u32 pseudo_palette[16];
 };
 
 #define TO_INF(ptr,member)     container_of(ptr,struct sa1100fb_info,member)
index 1ec9c3e0e1d85092f4e06b3a2db04d9836e36a1f..02ee752d5000567148cf24a174471b559cfc80b1 100644 (file)
@@ -6486,7 +6486,7 @@ SiS_SetTVSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
 
   if(!(SiS_Pr->SiS_TVMode & TVSetPAL)) {
      if(SiS_Pr->SiS_TVMode & TVSetNTSC1024) {
-        const unsigned char specialtv[] = {
+        static const unsigned char specialtv[] = {
                0xa7,0x07,0xf2,0x6e,0x17,0x8b,0x73,0x53,
                0x13,0x40,0x34,0xf4,0x63,0xbb,0xcc,0x7a,
                0x58,0xe4,0x73,0xda,0x13
index e92303823a4b083987090920011c79bb7b45c001..ecdd054d89510d0d68281c37ac7eb9caa9bb2c8e 100644 (file)
@@ -1702,6 +1702,7 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                if(ivideo->warncount++ < 10)
                        printk(KERN_INFO
                                "sisfb: Deprecated ioctl call received - update your application!\n");
+               /* fall through */
           case SISFB_GET_INFO:  /* For communication with X driver */
                ivideo->sisfb_infoblock.sisfb_id         = SISFB_ID;
                ivideo->sisfb_infoblock.sisfb_version    = VER_MAJOR;
@@ -1755,6 +1756,7 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                if(ivideo->warncount++ < 10)
                        printk(KERN_INFO
                                "sisfb: Deprecated ioctl call received - update your application!\n");
+               /* fall through */
           case SISFB_GET_VBRSTATUS:
                if(sisfb_CheckVBRetrace(ivideo))
                        return put_user((u32)1, argp);
@@ -1765,6 +1767,7 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                if(ivideo->warncount++ < 10)
                        printk(KERN_INFO
                                "sisfb: Deprecated ioctl call received - update your application!\n");
+               /* fall through */
           case SISFB_GET_AUTOMAXIMIZE:
                if(ivideo->sisfb_max)
                        return put_user((u32)1, argp);
@@ -1775,6 +1778,7 @@ static int        sisfb_ioctl(struct fb_info *info, unsigned int cmd,
                if(ivideo->warncount++ < 10)
                        printk(KERN_INFO
                                "sisfb: Deprecated ioctl call received - update your application!\n");
+               /* fall through */
           case SISFB_SET_AUTOMAXIMIZE:
                if(get_user(gpu32, argp))
                        return -EFAULT;
index 076dd2711630e1f78c3bf19572915b0b2b9831ee..6f0a19501c6a8d959e1f4a994e705934808d4c77 100644 (file)
@@ -1008,6 +1008,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
        case FB_BLANK_POWERDOWN:
                ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
                sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0);
+               /* fall through */
 
        case FB_BLANK_NORMAL:
                ctrl |= SM501_DC_CRT_CONTROL_BLANK;
@@ -1889,6 +1890,9 @@ static void sm501_free_init_fb(struct sm501fb_info *info,
 {
        struct fb_info *fbi = info->fb[head];
 
+       if (!fbi)
+               return;
+
        fb_dealloc_cmap(&fbi->cmap);
 }
 
@@ -2076,8 +2080,10 @@ static int sm501fb_remove(struct platform_device *pdev)
        sm501_free_init_fb(info, HEAD_CRT);
        sm501_free_init_fb(info, HEAD_PANEL);
 
-       unregister_framebuffer(fbinfo_crt);
-       unregister_framebuffer(fbinfo_pnl);
+       if (fbinfo_crt)
+               unregister_framebuffer(fbinfo_crt);
+       if (fbinfo_pnl)
+               unregister_framebuffer(fbinfo_pnl);
 
        sm501fb_stop(info);
        kfree(info);
@@ -2094,8 +2100,12 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
                              enum sm501_controller head)
 {
        struct fb_info *fbi = info->fb[head];
-       struct sm501fb_par *par = fbi->par;
+       struct sm501fb_par *par;
+
+       if (!fbi)
+               return 0;
 
+       par = fbi->par;
        if (par->screen.size == 0)
                return 0;
 
@@ -2141,8 +2151,12 @@ static void sm501fb_resume_fb(struct sm501fb_info *info,
                              enum sm501_controller head)
 {
        struct fb_info *fbi = info->fb[head];
-       struct sm501fb_par *par = fbi->par;
+       struct sm501fb_par *par;
+
+       if (!fbi)
+               return;
 
+       par = fbi->par;
        if (par->screen.size == 0)
                return;
 
index ef08a104fb42c6dafe3c88d32ca18b7430ede759..d44f14242016e07682134c4f2bfa4832e6286462 100644 (file)
@@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
 
        for (i = 0; i < len; i++) {
                ret = usb_control_msg(dev->udev,
-                                   usb_rcvctrlpipe(dev->udev, 0), (0x02),
-                                   (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
-                                   HZ);
-               if (ret < 1) {
-                       pr_err("Read EDID byte %d failed err %x\n", i, ret);
+                                     usb_rcvctrlpipe(dev->udev, 0), 0x02,
+                                     (0x80 | (0x02 << 5)), i << 8, 0xA1,
+                                     rbuf, 2, USB_CTRL_GET_TIMEOUT);
+               if (ret < 2) {
+                       pr_err("Read EDID byte %d failed: %d\n", i, ret);
                        i--;
                        break;
                }
index 18e896eeca62352f3c5e2c73479aab506c064958..12f7ea62dddd85eee3bca19e5335e3981c23e0d7 100644 (file)
@@ -70,7 +70,7 @@ module_param(use_gpio, int, 0);
 MODULE_PARM_DESC(use_gpio,
                "Use the gpio watchdog (required by old cobalt boards).");
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(nowayout,
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long unused)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index 7e6acaf3ece495ac9056bcd74532c994feb8ba0f..88c05d0448b2f937494b1c1bd42be6fc3a998192 100644 (file)
@@ -120,9 +120,9 @@ static inline void at91_wdt_reset(struct at91wdt *wdt)
 /*
  * Timer tick
  */
-static void at91_ping(unsigned long data)
+static void at91_ping(struct timer_list *t)
 {
-       struct at91wdt *wdt = (struct at91wdt *)data;
+       struct at91wdt *wdt = from_timer(wdt, t, timer);
        if (time_before(jiffies, wdt->next_heartbeat) ||
            !watchdog_active(&wdt->wdd)) {
                at91_wdt_reset(wdt);
@@ -222,7 +222,7 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
                         "watchdog already configured differently (mr = %x expecting %x)\n",
                         tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
 
-       setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+       timer_setup(&wdt->timer, at91_ping, 0);
 
        /*
         * Use min_heartbeat the first time to avoid spurious watchdog reset:
index 236582809336bf7e50eb9269331ec457b41a9e9d..f41b756d6dd552722d0643ab9bd9336da97fd054 100644 (file)
@@ -106,9 +106,9 @@ static const struct watchdog_ops bcm47xx_wdt_hard_ops = {
        .restart        = bcm47xx_wdt_restart,
 };
 
-static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
+static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
 {
-       struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+       struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
        u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
 
        if (!atomic_dec_and_test(&wdt->soft_ticks)) {
@@ -133,7 +133,7 @@ static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd)
        struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
 
        bcm47xx_wdt_soft_keepalive(wdd);
-       bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
+       bcm47xx_wdt_soft_timer_tick(&wdt->soft_timer);
 
        return 0;
 }
@@ -190,8 +190,7 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
 
        if (soft) {
                wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
-               setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
-                           (long unsigned int)wdt);
+               timer_setup(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, 0);
        } else {
                wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
        }
index ab26fd90729ec664a4fb2caf8a79582ee0108769..8555afc70f9bf37552e8e1c76575edb7f97e93ad 100644 (file)
@@ -77,7 +77,7 @@ static void bcm63xx_wdt_isr(void *data)
        die(PFX " fire", regs);
 }
 
-static void bcm63xx_timer_tick(unsigned long unused)
+static void bcm63xx_timer_tick(struct timer_list *unused)
 {
        if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
                bcm63xx_wdt_hw_start();
@@ -240,7 +240,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
        int ret;
        struct resource *r;
 
-       setup_timer(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0L);
+       timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
index 6c3f78e45c265da4b6f9cfefdb8af16a96b30874..6cfb102c397c9454697d7e6b301b779e4afdcd37 100644 (file)
@@ -69,7 +69,7 @@ static struct {
 
 /* generic helper functions */
 
-static void cpu5wdt_trigger(unsigned long unused)
+static void cpu5wdt_trigger(struct timer_list *unused)
 {
        if (verbose > 2)
                pr_debug("trigger at %i ticks\n", ticks);
@@ -224,7 +224,7 @@ static int cpu5wdt_init(void)
 
        init_completion(&cpu5wdt_device.stop);
        cpu5wdt_device.queue = 0;
-       setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
+       timer_setup(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
        cpu5wdt_device.default_ticks = ticks;
 
        if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
index 8a616a57bb90441cc946c2d4d25c71dd5ae8f544..88d823d87a4b3895e4a139f9579ae6b08359f8dd 100644 (file)
@@ -121,7 +121,7 @@ module_param(action, int, 0);
 MODULE_PARM_DESC(action, "after watchdog resets, generate: "
                                "0 = RESET(*)  1 = SMI  2 = NMI  3 = SCI");
 
-static void zf_ping(unsigned long data);
+static void zf_ping(struct timer_list *unused);
 
 static int zf_action = GEN_RESET;
 static unsigned long zf_is_open;
@@ -237,7 +237,7 @@ static void zf_timer_on(void)
 }
 
 
-static void zf_ping(unsigned long data)
+static void zf_ping(struct timer_list *unused)
 {
        unsigned int ctrl_reg = 0;
        unsigned long flags;
index c9e38096ea91fff0bcfed82f8d8058ff0bb7be17..3cc07447c6558b3c311c6295249fa4eaab447583 100644 (file)
@@ -99,7 +99,7 @@ static struct {
        {0x0000, 0},
 };
 
-static void mixcomwd_timerfun(unsigned long d);
+static void mixcomwd_timerfun(struct timer_list *unused);
 
 static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
 
@@ -120,7 +120,7 @@ static void mixcomwd_ping(void)
        return;
 }
 
-static void mixcomwd_timerfun(unsigned long d)
+static void mixcomwd_timerfun(struct timer_list *unused)
 {
        mixcomwd_ping();
        mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
index 366e5c7e650bfd4b3da3fddcebd9bc9ab86a23ac..6610e9217dbc237a7e5352df3cb9587a6770be67 100644 (file)
@@ -80,9 +80,9 @@ static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
        spin_unlock(&ddata->lock);
 }
 
-static void mpc8xxx_wdt_timer_ping(unsigned long arg)
+static void mpc8xxx_wdt_timer_ping(struct timer_list *t)
 {
-       struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
+       struct mpc8xxx_wdt_ddata *ddata = from_timer(ddata, t, timer);
 
        mpc8xxx_wdt_keepalive(ddata);
        /* We're pinging it twice faster than needed, just to be sure. */
@@ -173,8 +173,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
        }
 
        spin_lock_init(&ddata->lock);
-       setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
-                   (unsigned long)ddata);
+       timer_setup(&ddata->timer, mpc8xxx_wdt_timer_ping, 0);
 
        ddata->wdd.info = &mpc8xxx_wdt_info,
        ddata->wdd.ops = &mpc8xxx_wdt_ops,
index ff27c4ac96e442dadec4129ecb98931ddc2ca150..ca360d204548fbd1f13a8750a9bbe6d091f9eaf4 100644 (file)
@@ -68,7 +68,7 @@ static struct {
        unsigned int gstate;
 } mtx1_wdt_device;
 
-static void mtx1_wdt_trigger(unsigned long unused)
+static void mtx1_wdt_trigger(struct timer_list *unused)
 {
        spin_lock(&mtx1_wdt_device.lock);
        if (mtx1_wdt_device.running)
@@ -219,7 +219,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
        init_completion(&mtx1_wdt_device.stop);
        mtx1_wdt_device.queue = 0;
        clear_bit(0, &mtx1_wdt_device.inuse);
-       setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
+       timer_setup(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0);
        mtx1_wdt_device.default_ticks = ticks;
 
        ret = misc_register(&mtx1_wdt_misc);
index d5bed78c4d9fbbec03ec2268e391676b1996d8ca..830bd04ff911f4f6ebd00bdadd9ff727405a5eeb 100644 (file)
@@ -216,7 +216,7 @@ static ssize_t nuc900_wdt_write(struct file *file, const char __user *data,
        return len;
 }
 
-static void nuc900_wdt_timer_ping(unsigned long data)
+static void nuc900_wdt_timer_ping(struct timer_list *unused)
 {
        if (time_before(jiffies, nuc900_wdt->next_heartbeat)) {
                nuc900_wdt_keepalive();
@@ -267,7 +267,7 @@ static int nuc900wdt_probe(struct platform_device *pdev)
 
        clk_enable(nuc900_wdt->wdt_clock);
 
-       setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
+       timer_setup(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
 
        ret = misc_register(&nuc900wdt_miscdev);
        if (ret) {
index 3ad5206d79357e8862078e16426895811aa3d87c..b72ce68eacd3daa577c4a5fe2156e19e52545290 100644 (file)
@@ -367,7 +367,7 @@ static void pcwd_show_card_info(void)
                pr_info("No previous trip detected - Cold boot or reset\n");
 }
 
-static void pcwd_timer_ping(unsigned long data)
+static void pcwd_timer_ping(struct timer_list *unused)
 {
        int wdrst_stat;
 
@@ -893,7 +893,7 @@ static int pcwd_isa_probe(struct device *dev, unsigned int id)
        /* clear the "card caused reboot" flag */
        pcwd_clear_status();
 
-       setup_timer(&pcwd_private.timer, pcwd_timer_ping, 0);
+       timer_setup(&pcwd_private.timer, pcwd_timer_ping, 0);
 
        /*  Disable the board  */
        pcwd_stop();
index e35cf5e87907c3f98520ab0440d6088e6976f803..e0a6f8c0f03cde84a32e5744a1f3ee24fd7cbd00 100644 (file)
@@ -85,7 +85,7 @@ static inline void pikawdt_reset(void)
 /*
  * Timer tick
  */
-static void pikawdt_ping(unsigned long data)
+static void pikawdt_ping(struct timer_list *unused)
 {
        if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
                        (!nowayout && !pikawdt_private.open)) {
@@ -269,7 +269,7 @@ static int __init pikawdt_init(void)
 
        iounmap(fpga);
 
-       setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
+       timer_setup(&pikawdt_private.timer, pikawdt_ping, 0);
 
        ret = misc_register(&pikawdt_miscdev);
        if (ret) {
index 47a8f1b1087d4f5a310ee9f01b67bce406251666..a281aa84bfb1402ff2f7567289902a2589ad223f 100644 (file)
@@ -67,7 +67,7 @@ static struct {
 
 /* generic helper functions */
 
-static void rdc321x_wdt_trigger(unsigned long unused)
+static void rdc321x_wdt_trigger(struct timer_list *unused)
 {
        unsigned long flags;
        u32 val;
@@ -262,7 +262,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
 
        clear_bit(0, &rdc321x_wdt_device.inuse);
 
-       setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+       timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
 
        rdc321x_wdt_device.default_ticks = ticks;
 
index 8d589939bc8447b7cb116ba8b3ff70bdb8f207b6..87333a41f75384ca7bd4ff3b5c66bad5602e5dd8 100644 (file)
@@ -112,7 +112,7 @@ MODULE_PARM_DESC(nowayout,
        "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -122,7 +122,7 @@ static char wdt_expect_close;
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index 3e9bbaa37bf46ac872b51da65fc4ed31ee1b9f2e..6aadb56e7faaa7161b8f32d8fd45a573e989e4a2 100644 (file)
@@ -123,7 +123,7 @@ MODULE_PARM_DESC(nowayout,
 
 static __u16 __iomem *wdtmrctl;
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index 517a733175ef84c8f8a7be3f79d8b97a3b6eed9e..a7d6425db807ff9d95c91aa24be73bbdfa15a9bd 100644 (file)
@@ -175,9 +175,9 @@ static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
        return 0;
 }
 
-static void sh_wdt_ping(unsigned long data)
+static void sh_wdt_ping(struct timer_list *t)
 {
-       struct sh_wdt *wdt = (struct sh_wdt *)data;
+       struct sh_wdt *wdt = from_timer(wdt, t, timer);
        unsigned long flags;
 
        spin_lock_irqsave(&wdt->lock, flags);
@@ -275,7 +275,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
                return rc;
        }
 
-       setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
+       timer_setup(&wdt->timer, sh_wdt_ping, 0);
        wdt->timer.expires      = next_ping_period(clock_division_ratio);
 
        dev_info(&pdev->dev, "initialized.\n");
index ad3c3be13b40981d5a4ddecbccfbbfb262071e71..b085ef1084ec4f99d9d4fd2364fe4f35c0b05230 100644 (file)
@@ -67,7 +67,7 @@ static struct watchdog_device wdt_dev;
 static struct resource wdt_res;
 static void __iomem *wdt_mem;
 static unsigned int mmio;
-static void wdt_timer_tick(unsigned long data);
+static void wdt_timer_tick(struct timer_list *unused);
 static DEFINE_TIMER(timer, wdt_timer_tick);
                                        /* The timer that pings the watchdog */
 static unsigned long next_heartbeat;   /* the next_heartbeat for the timer */
@@ -88,7 +88,7 @@ static inline void wdt_reset(void)
  *     then the external/userspace heartbeat).
  *  2) the watchdog timer has been stopped by userspace.
  */
-static void wdt_timer_tick(unsigned long data)
+static void wdt_timer_tick(struct timer_list *unused)
 {
        if (time_before(jiffies, next_heartbeat) ||
           (!watchdog_active(&wdt_dev))) {
index ba6b680af1000ebe795c0522691675494b28795a..05658ecc0aa4cdc87875ec05aba6eed23b4f1551 100644 (file)
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(nowayout,
                "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
 static DEFINE_TIMER(timer, wdt_timer_ping);
 static unsigned long next_heartbeat;
 static unsigned long wdt_is_open;
@@ -108,7 +108,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
  *     Whack the dog
  */
 
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
 {
        /* If we got a heartbeat pulse within the WDT_US_INTERVAL
         * we agree to ping the WDT
index 74265b2f806ca24d4eda05ef860e8092ef1afe1d..8a8d952f8df96c15a1b7a4462f50edd233fb38f3 100644 (file)
@@ -137,25 +137,6 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
 }
 EXPORT_SYMBOL_GPL(watchdog_init_timeout);
 
-static int watchdog_reboot_notifier(struct notifier_block *nb,
-                                   unsigned long code, void *data)
-{
-       struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
-                                                  reboot_nb);
-
-       if (code == SYS_DOWN || code == SYS_HALT) {
-               if (watchdog_active(wdd)) {
-                       int ret;
-
-                       ret = wdd->ops->stop(wdd);
-                       if (ret)
-                               return NOTIFY_BAD;
-               }
-       }
-
-       return NOTIFY_DONE;
-}
-
 static int watchdog_restart_notifier(struct notifier_block *nb,
                                     unsigned long action, void *data)
 {
@@ -244,19 +225,6 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
                }
        }
 
-       if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
-               wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
-               ret = register_reboot_notifier(&wdd->reboot_nb);
-               if (ret) {
-                       pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
-                              wdd->id, ret);
-                       watchdog_dev_unregister(wdd);
-                       ida_simple_remove(&watchdog_ida, wdd->id);
-                       return ret;
-               }
-       }
-
        if (wdd->ops->restart) {
                wdd->restart_nb.notifier_call = watchdog_restart_notifier;
 
@@ -302,9 +270,6 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd)
        if (wdd->ops->restart)
                unregister_restart_handler(&wdd->restart_nb);
 
-       if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
-               unregister_reboot_notifier(&wdd->reboot_nb);
-
        watchdog_dev_unregister(wdd);
        ida_simple_remove(&watchdog_ida, wdd->id);
 }
index 0826e663bd5a3ce1c49b0beef8c6ed9f97da4fb0..1e971a50d7fb74ffc1d7af27e39ef4a3df4d4cc9 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/miscdevice.h>  /* For handling misc devices */
 #include <linux/module.h>      /* For module stuff/... */
 #include <linux/mutex.h>       /* For mutexes */
+#include <linux/reboot.h>      /* For reboot notifier */
 #include <linux/slab.h>                /* For memory functions */
 #include <linux/types.h>       /* For standard types (like size_t) */
 #include <linux/watchdog.h>    /* For watchdog specific items */
@@ -1016,6 +1017,25 @@ static struct class watchdog_class = {
        .dev_groups =   wdt_groups,
 };
 
+static int watchdog_reboot_notifier(struct notifier_block *nb,
+                                   unsigned long code, void *data)
+{
+       struct watchdog_device *wdd;
+
+       wdd = container_of(nb, struct watchdog_device, reboot_nb);
+       if (code == SYS_DOWN || code == SYS_HALT) {
+               if (watchdog_active(wdd)) {
+                       int ret;
+
+                       ret = wdd->ops->stop(wdd);
+                       if (ret)
+                               return NOTIFY_BAD;
+               }
+       }
+
+       return NOTIFY_DONE;
+}
+
 /*
  *     watchdog_dev_register: register a watchdog device
  *     @wdd: watchdog device
@@ -1049,6 +1069,18 @@ int watchdog_dev_register(struct watchdog_device *wdd)
        if (ret) {
                device_destroy(&watchdog_class, devno);
                watchdog_cdev_unregister(wdd);
+               return ret;
+       }
+
+       if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
+               wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+               ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
+               if (ret) {
+                       pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+                              wdd->id, ret);
+                       watchdog_dev_unregister(wdd);
+               }
        }
 
        return ret;
index 139e018a82b06079231fae25abc9d6dcc58da300..f45114fd8e1e76cf1359e3446acd5a9426791db1 100644 (file)
@@ -358,10 +358,10 @@ struct deferred_entry {
        struct page *page;
 };
 static LIST_HEAD(deferred_list);
-static void gnttab_handle_deferred(unsigned long);
+static void gnttab_handle_deferred(struct timer_list *);
 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 
-static void gnttab_handle_deferred(unsigned long unused)
+static void gnttab_handle_deferred(struct timer_list *unused)
 {
        unsigned int nr = 10;
        struct deferred_entry *first = NULL;
index 168094a3fae7ebb109c1ead9fe3b8c3e38201444..29641383e136024ec9f2993b1a29a7fc9c2fa94b 100644 (file)
@@ -59,6 +59,3 @@ endif
 
 targets := $(patsubst $(obj)/%,%, \
                                 $(shell find $(obj) -name \*.gen.S 2>/dev/null))
-# Without this, built-in.o won't be created when it's empty, and the
-# final vmlinux link will fail.
-obj- := dummy
index 2a5de610dd8fd5f0e905046f7752127543d687f2..bdabb2765d1b3e324d77a9513ee170da18ec6d5e 100644 (file)
@@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
 
        if (v9inode->qid.type != st->qid.type)
                return 0;
+
+       if (v9inode->qid.path != st->qid.path)
+               return 0;
        return 1;
 }
 
index 70f9887c59a90f77c28d1b1b484dc70eff43be94..7f6ae21a27b3ca451d353398b4a0e25cb55a9177 100644 (file)
@@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
 
        if (v9inode->qid.type != st->qid.type)
                return 0;
+
+       if (v9inode->qid.path != st->qid.path)
+               return 0;
        return 1;
 }
 
index 8b75463cb2116895b4a44b28bf1faab7f1db8f71..af03c2a901eb4fe0518d0fbbd60359f48357ab64 100644 (file)
@@ -94,13 +94,13 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
        if (v9ses->cache)
                sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
 
-       sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
+       sb->s_flags |= SB_ACTIVE | SB_DIRSYNC | SB_NOATIME;
        if (!v9ses->cache)
-               sb->s_flags |= MS_SYNCHRONOUS;
+               sb->s_flags |= SB_SYNCHRONOUS;
 
 #ifdef CONFIG_9P_FS_POSIX_ACL
        if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
 #endif
 
        return 0;
index c9fdfb11293357f1d79c0d729bc4d5db1118509d..cfda2c7caedcec8b53d738f7c93924a723515370 100644 (file)
@@ -213,7 +213,7 @@ static int parse_options(struct super_block *sb, char *options)
 static int adfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return parse_options(sb, data);
 }
 
@@ -372,7 +372,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *root;
        int ret = -EINVAL;
 
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
 
        asb = kzalloc(sizeof(*asb), GFP_KERNEL);
        if (!asb)
index 185d5ab7e986af489612d795185d91e86a22ace3..0f0e6925e97dd123cc03f991f33f998599e57e5c 100644 (file)
@@ -453,7 +453,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
        pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf);
        if (!sb_rdonly(sb))
                pr_warn("Remounting filesystem read-only\n");
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        va_end(args);
 }
 
index 2b1399611d9e6595151a578724c4bc6d17996ff2..5ba9ef2742f6ee5e903b3df04c5f627fc4fc00ae 100644 (file)
@@ -250,12 +250,12 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
        int i, res = 0;
        struct affs_sb_info *sbi = AFFS_SB(sb);
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                return 0;
 
        if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
                pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id);
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
                return 0;
        }
 
@@ -288,7 +288,7 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
                if (affs_checksum_block(sb, bh)) {
                        pr_warn("Bitmap %u invalid - mounting %s read only.\n",
                                bm->bm_key, sb->s_id);
-                       *flags |= MS_RDONLY;
+                       *flags |= SB_RDONLY;
                        goto out;
                }
                pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key);
index 884bedab7266a528b60884c7ab3d91e9a703724d..1117e36134cc82e5127de496240f6f99b541b0a4 100644 (file)
@@ -356,7 +356,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic             = AFFS_SUPER_MAGIC;
        sb->s_op                = &affs_sops;
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
 
        sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
        if (!sbi)
@@ -466,7 +466,7 @@ got_root:
        if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
             || chksum == MUFS_DCOFS) && !sb_rdonly(sb)) {
                pr_notice("Dircache FS - mounting %s read only\n", sb->s_id);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        switch (chksum) {
        case MUFS_FS:
@@ -488,7 +488,7 @@ got_root:
                /* fall thru */
        case FS_OFS:
                affs_set_opt(sbi->s_flags, SF_OFS);
-               sb->s_flags |= MS_NOEXEC;
+               sb->s_flags |= SB_NOEXEC;
                break;
        case MUFS_DCOFS:
        case MUFS_INTLOFS:
@@ -497,7 +497,7 @@ got_root:
        case FS_INTLOFS:
                affs_set_opt(sbi->s_flags, SF_INTL);
                affs_set_opt(sbi->s_flags, SF_OFS);
-               sb->s_flags |= MS_NOEXEC;
+               sb->s_flags |= SB_NOEXEC;
                break;
        default:
                pr_err("Unknown filesystem on device %s: %08X\n",
@@ -513,7 +513,7 @@ got_root:
                        sig, sig[3] + '0', blocksize);
        }
 
-       sb->s_flags |= MS_NODEV | MS_NOSUID;
+       sb->s_flags |= SB_NODEV | SB_NOSUID;
 
        sbi->s_data_blksize = sb->s_blocksize;
        if (affs_test_opt(sbi->s_flags, SF_OFS))
@@ -570,7 +570,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
        pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
 
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
 
        memcpy(volume, sbi->s_volume, 32);
        if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
@@ -596,10 +596,10 @@ affs_remount(struct super_block *sb, int *flags, char *data)
        memcpy(sbi->s_volume, volume, 32);
        spin_unlock(&sbi->symlink_lock);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                affs_free_bitmap(sb);
        else
                res = affs_init_bitmap(sb, flags);
index 1858c91169e4fc213e77b548628386041790b0c0..9bb921d120d0f8b42bd1e68b508af1f285bbcdbc 100644 (file)
@@ -207,13 +207,8 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
                rcu_read_lock();
                cell = afs_lookup_cell_rcu(net, name, namesz);
                rcu_read_unlock();
-               if (!IS_ERR(cell)) {
-                       if (excl) {
-                               afs_put_cell(net, cell);
-                               return ERR_PTR(-EEXIST);
-                       }
+               if (!IS_ERR(cell))
                        goto wait_for_cell;
-               }
        }
 
        /* Assume we're probably going to create a cell and preallocate and
index ab618d32554c648848b6001a3964b279deb794dc..ff8d5bf4354f306227a297ca542078580ca57e34 100644 (file)
@@ -765,6 +765,8 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
        if (fc->ac.error < 0)
                return;
 
+       d_drop(new_dentry);
+
        inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
                         newfid, newstatus, newcb, fc->cbi);
        if (IS_ERR(inode)) {
@@ -775,9 +777,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
                return;
        }
 
-       d_instantiate(new_dentry, inode);
-       if (d_unhashed(new_dentry))
-               d_rehash(new_dentry);
+       d_add(new_dentry, inode);
 }
 
 /*
@@ -818,6 +818,8 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -972,7 +974,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        struct afs_fs_cursor fc;
        struct afs_file_status newstatus;
        struct afs_callback newcb;
-       struct afs_vnode *dvnode = dvnode = AFS_FS_I(dir);
+       struct afs_vnode *dvnode = AFS_FS_I(dir);
        struct afs_fid newfid;
        struct key *key;
        int ret;
@@ -1006,6 +1008,8 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -1053,7 +1057,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
                        afs_end_vnode_operation(&fc);
-                       return -ERESTARTSYS;
+                       goto error_key;
                }
 
                while (afs_select_fileserver(&fc)) {
@@ -1071,6 +1075,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -1130,6 +1136,8 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
                        goto error_key;
+       } else {
+               goto error_key;
        }
 
        key_put(key);
@@ -1180,7 +1188,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (orig_dvnode != new_dvnode) {
                        if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
                                afs_end_vnode_operation(&fc);
-                               return -ERESTARTSYS;
+                               goto error_key;
                        }
                }
                while (afs_select_fileserver(&fc)) {
@@ -1199,14 +1207,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto error_key;
        }
 
-       key_put(key);
-       _leave(" = 0");
-       return 0;
-
 error_key:
        key_put(key);
 error:
-       d_drop(new_dentry);
        _leave(" = %d", ret);
        return ret;
 }
index 7571a5dfd5a35cbd674ddf24671029a8b6b522d1..c40ba2fe3cbeee50d1b529d5e1e9d2960633f995 100644 (file)
@@ -170,7 +170,7 @@ void afs_lock_work(struct work_struct *work)
 {
        struct afs_vnode *vnode =
                container_of(work, struct afs_vnode, lock_work.work);
-       struct file_lock *fl;
+       struct file_lock *fl, *next;
        afs_lock_type_t type;
        struct key *key;
        int ret;
@@ -179,117 +179,136 @@ void afs_lock_work(struct work_struct *work)
 
        spin_lock(&vnode->lock);
 
-       if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) {
+again:
+       _debug("wstate %u for %p", vnode->lock_state, vnode);
+       switch (vnode->lock_state) {
+       case AFS_VNODE_LOCK_NEED_UNLOCK:
                _debug("unlock");
+               vnode->lock_state = AFS_VNODE_LOCK_UNLOCKING;
                spin_unlock(&vnode->lock);
 
                /* attempt to release the server lock; if it fails, we just
-                * wait 5 minutes and it'll time out anyway */
-               ret = afs_release_lock(vnode, vnode->unlock_key);
+                * wait 5 minutes and it'll expire anyway */
+               ret = afs_release_lock(vnode, vnode->lock_key);
                if (ret < 0)
                        printk(KERN_WARNING "AFS:"
                               " Failed to release lock on {%x:%x} error %d\n",
                               vnode->fid.vid, vnode->fid.vnode, ret);
 
                spin_lock(&vnode->lock);
-               key_put(vnode->unlock_key);
-               vnode->unlock_key = NULL;
-               clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags);
-       }
+               key_put(vnode->lock_key);
+               vnode->lock_key = NULL;
+               vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+               if (list_empty(&vnode->pending_locks)) {
+                       spin_unlock(&vnode->lock);
+                       return;
+               }
+
+               /* The new front of the queue now owns the state variables. */
+               next = list_entry(vnode->pending_locks.next,
+                                 struct file_lock, fl_u.afs.link);
+               vnode->lock_key = afs_file_key(next->fl_file);
+               vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+               vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+               goto again;
 
-       /* if we've got a lock, then it must be time to extend that lock as AFS
-        * locks time out after 5 minutes */
-       if (!list_empty(&vnode->granted_locks)) {
+       /* If we've already got a lock, then it must be time to extend that
+        * lock as AFS locks time out after 5 minutes.
+        */
+       case AFS_VNODE_LOCK_GRANTED:
                _debug("extend");
 
-               if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
-                       BUG();
-               fl = list_entry(vnode->granted_locks.next,
-                               struct file_lock, fl_u.afs.link);
-               key = key_get(afs_file_key(fl->fl_file));
+               ASSERT(!list_empty(&vnode->granted_locks));
+
+               key = key_get(vnode->lock_key);
+               vnode->lock_state = AFS_VNODE_LOCK_EXTENDING;
                spin_unlock(&vnode->lock);
 
-               ret = afs_extend_lock(vnode, key);
-               clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+               ret = afs_extend_lock(vnode, key); /* RPC */
                key_put(key);
-               switch (ret) {
-               case 0:
+
+               if (ret < 0)
+                       pr_warning("AFS: Failed to extend lock on {%x:%x} error %d\n",
+                                  vnode->fid.vid, vnode->fid.vnode, ret);
+
+               spin_lock(&vnode->lock);
+
+               if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
+                       goto again;
+               vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+
+               if (ret == 0)
                        afs_schedule_lock_extension(vnode);
-                       break;
-               default:
-                       /* ummm... we failed to extend the lock - retry
-                        * extension shortly */
-                       printk(KERN_WARNING "AFS:"
-                              " Failed to extend lock on {%x:%x} error %d\n",
-                              vnode->fid.vid, vnode->fid.vnode, ret);
+               else
                        queue_delayed_work(afs_lock_manager, &vnode->lock_work,
                                           HZ * 10);
-                       break;
-               }
-               _leave(" [extend]");
+               spin_unlock(&vnode->lock);
+               _leave(" [ext]");
                return;
-       }
 
-       /* if we don't have a granted lock, then we must've been called back by
-        * the server, and so if might be possible to get a lock we're
-        * currently waiting for */
-       if (!list_empty(&vnode->pending_locks)) {
+               /* If we don't have a granted lock, then we must've been called
+                * back by the server, and so if might be possible to get a
+                * lock we're currently waiting for.
+                */
+       case AFS_VNODE_LOCK_WAITING_FOR_CB:
                _debug("get");
 
-               if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
-                       BUG();
-               fl = list_entry(vnode->pending_locks.next,
-                               struct file_lock, fl_u.afs.link);
-               key = key_get(afs_file_key(fl->fl_file));
-               type = (fl->fl_type == F_RDLCK) ?
-                       AFS_LOCK_READ : AFS_LOCK_WRITE;
+               key = key_get(vnode->lock_key);
+               type = vnode->lock_type;
+               vnode->lock_state = AFS_VNODE_LOCK_SETTING;
                spin_unlock(&vnode->lock);
 
-               ret = afs_set_lock(vnode, key, type);
-               clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+               ret = afs_set_lock(vnode, key, type); /* RPC */
+               key_put(key);
+
+               spin_lock(&vnode->lock);
                switch (ret) {
                case -EWOULDBLOCK:
                        _debug("blocked");
                        break;
                case 0:
                        _debug("acquired");
-                       if (type == AFS_LOCK_READ)
-                               set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
-                       else
-                               set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-                       ret = AFS_LOCK_GRANTED;
+                       vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+                       /* Fall through */
                default:
-                       spin_lock(&vnode->lock);
-                       /* the pending lock may have been withdrawn due to a
-                        * signal */
-                       if (list_entry(vnode->pending_locks.next,
-                                      struct file_lock, fl_u.afs.link) == fl) {
-                               fl->fl_u.afs.state = ret;
-                               if (ret == AFS_LOCK_GRANTED)
-                                       afs_grant_locks(vnode, fl);
-                               else
-                                       list_del_init(&fl->fl_u.afs.link);
-                               wake_up(&fl->fl_wait);
-                               spin_unlock(&vnode->lock);
-                       } else {
+                       /* Pass the lock or the error onto the first locker in
+                        * the list - if they're looking for this type of lock.
+                        * If they're not, we assume that whoever asked for it
+                        * took a signal.
+                        */
+                       if (list_empty(&vnode->pending_locks)) {
                                _debug("withdrawn");
-                               clear_bit(AFS_VNODE_READLOCKED, &vnode->flags);
-                               clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-                               spin_unlock(&vnode->lock);
-                               afs_release_lock(vnode, key);
-                               if (!list_empty(&vnode->pending_locks))
-                                       afs_lock_may_be_available(vnode);
+                               vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+                               goto again;
                        }
-                       break;
+
+                       fl = list_entry(vnode->pending_locks.next,
+                                       struct file_lock, fl_u.afs.link);
+                       type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+                       if (vnode->lock_type != type) {
+                               _debug("changed");
+                               vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+                               goto again;
+                       }
+
+                       fl->fl_u.afs.state = ret;
+                       if (ret == 0)
+                               afs_grant_locks(vnode, fl);
+                       else
+                               list_del_init(&fl->fl_u.afs.link);
+                       wake_up(&fl->fl_wait);
+                       spin_unlock(&vnode->lock);
+                       _leave(" [granted]");
+                       return;
                }
-               key_put(key);
-               _leave(" [pend]");
+
+       default:
+               /* Looks like a lock request was withdrawn. */
+               spin_unlock(&vnode->lock);
+               _leave(" [no]");
                return;
        }
-
-       /* looks like the lock request was withdrawn on a signal */
-       spin_unlock(&vnode->lock);
-       _leave(" [no locks]");
 }
 
 /*
@@ -298,15 +317,105 @@ void afs_lock_work(struct work_struct *work)
  * AF_RXRPC
  * - the caller must hold the vnode lock
  */
-static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
+static void afs_defer_unlock(struct afs_vnode *vnode)
 {
-       cancel_delayed_work(&vnode->lock_work);
-       if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) &&
-           !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
-               BUG();
-       if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags))
-               BUG();
-       vnode->unlock_key = key_get(key);
+       _enter("");
+
+       if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
+           vnode->lock_state == AFS_VNODE_LOCK_EXTENDING) {
+               cancel_delayed_work(&vnode->lock_work);
+
+               vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+               afs_lock_may_be_available(vnode);
+       }
+}
+
+/*
+ * Check that our view of the file metadata is up to date and check to see
+ * whether we think that we have a locking permit.
+ */
+static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
+                             afs_lock_type_t type, bool can_sleep)
+{
+       afs_access_t access;
+       int ret;
+
+       /* Make sure we've got a callback on this file and that our view of the
+        * data version is up to date.
+        */
+       ret = afs_validate(vnode, key);
+       if (ret < 0)
+               return ret;
+
+       /* Check the permission set to see if we're actually going to be
+        * allowed to get a lock on this file.
+        */
+       ret = afs_check_permit(vnode, key, &access);
+       if (ret < 0)
+               return ret;
+
+       /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
+        * read-lock a file and WRITE or INSERT perm to write-lock a file.
+        *
+        * We can't rely on the server to do this for us since if we want to
+        * share a read lock that we already have, we won't go the server.
+        */
+       if (type == AFS_LOCK_READ) {
+               if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
+                       return -EACCES;
+               if (vnode->status.lock_count == -1 && !can_sleep)
+                       return -EAGAIN; /* Write locked */
+       } else {
+               if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
+                       return -EACCES;
+               if (vnode->status.lock_count != 0 && !can_sleep)
+                       return -EAGAIN; /* Locked */
+       }
+
+       return 0;
+}
+
+/*
+ * Remove the front runner from the pending queue.
+ * - The caller must hold vnode->lock.
+ */
+static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
+{
+       struct file_lock *next;
+
+       _enter("");
+
+       /* ->lock_type, ->lock_key and ->lock_state only belong to this
+        * file_lock if we're at the front of the pending queue or if we have
+        * the lock granted or if the lock_state is NEED_UNLOCK or UNLOCKING.
+        */
+       if (vnode->granted_locks.next == &fl->fl_u.afs.link &&
+           vnode->granted_locks.prev == &fl->fl_u.afs.link) {
+               list_del_init(&fl->fl_u.afs.link);
+               afs_defer_unlock(vnode);
+               return;
+       }
+
+       if (!list_empty(&vnode->granted_locks) ||
+           vnode->pending_locks.next != &fl->fl_u.afs.link) {
+               list_del_init(&fl->fl_u.afs.link);
+               return;
+       }
+
+       list_del_init(&fl->fl_u.afs.link);
+       key_put(vnode->lock_key);
+       vnode->lock_key = NULL;
+       vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+       if (list_empty(&vnode->pending_locks))
+               return;
+
+       /* The new front of the queue now owns the state variables. */
+       next = list_entry(vnode->pending_locks.next,
+                         struct file_lock, fl_u.afs.link);
+       vnode->lock_key = afs_file_key(next->fl_file);
+       vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+       vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
        afs_lock_may_be_available(vnode);
 }
 
@@ -315,7 +424,7 @@ static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
  */
 static int afs_do_setlk(struct file *file, struct file_lock *fl)
 {
-       struct inode *inode = file_inode(file);
+       struct inode *inode = locks_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
        afs_lock_type_t type;
        struct key *key = afs_file_key(file);
@@ -333,165 +442,136 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
 
        type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
 
-       spin_lock(&inode->i_lock);
-
-       /* make sure we've got a callback on this file and that our view of the
-        * data version is up to date */
-       ret = afs_validate(vnode, key);
+       ret = afs_do_setlk_check(vnode, key, type, fl->fl_flags & FL_SLEEP);
        if (ret < 0)
-               goto error;
-
-       if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) {
-               ret = -EAGAIN;
-               goto error;
-       }
+               return ret;
 
        spin_lock(&vnode->lock);
 
-       /* if we've already got a readlock on the server then we can instantly
+       /* If we've already got a readlock on the server then we instantly
         * grant another readlock, irrespective of whether there are any
-        * pending writelocks */
+        * pending writelocks.
+        */
        if (type == AFS_LOCK_READ &&
-           vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
+           vnode->lock_state == AFS_VNODE_LOCK_GRANTED &&
+           vnode->lock_type == AFS_LOCK_READ) {
                _debug("instant readlock");
-               ASSERTCMP(vnode->flags &
-                         ((1 << AFS_VNODE_LOCKING) |
-                          (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
                ASSERT(!list_empty(&vnode->granted_locks));
-               goto sharing_existing_lock;
+               goto share_existing_lock;
        }
 
-       /* if there's no-one else with a lock on this vnode, then we need to
-        * ask the server for a lock */
-       if (list_empty(&vnode->pending_locks) &&
-           list_empty(&vnode->granted_locks)) {
-               _debug("not locked");
-               ASSERTCMP(vnode->flags &
-                         ((1 << AFS_VNODE_LOCKING) |
-                          (1 << AFS_VNODE_READLOCKED) |
-                          (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
-               list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
-               set_bit(AFS_VNODE_LOCKING, &vnode->flags);
-               spin_unlock(&vnode->lock);
+       list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
 
-               ret = afs_set_lock(vnode, key, type);
-               clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
-               switch (ret) {
-               case 0:
-                       _debug("acquired");
-                       goto acquired_server_lock;
-               case -EWOULDBLOCK:
-                       _debug("would block");
-                       spin_lock(&vnode->lock);
-                       ASSERT(list_empty(&vnode->granted_locks));
-                       ASSERTCMP(vnode->pending_locks.next, ==,
-                                 &fl->fl_u.afs.link);
-                       goto wait;
-               default:
-                       spin_lock(&vnode->lock);
-                       list_del_init(&fl->fl_u.afs.link);
-                       spin_unlock(&vnode->lock);
-                       goto error;
-               }
-       }
+       if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+               goto need_to_wait;
 
-       /* otherwise, we need to wait for a local lock to become available */
-       _debug("wait local");
-       list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
-wait:
-       if (!(fl->fl_flags & FL_SLEEP)) {
-               _debug("noblock");
-               ret = -EAGAIN;
-               goto abort_attempt;
-       }
+       /* We don't have a lock on this vnode and we aren't currently waiting
+        * for one either, so ask the server for a lock.
+        *
+        * Note that we need to be careful if we get interrupted by a signal
+        * after dispatching the request as we may still get the lock, even
+        * though we don't wait for the reply (it's not too bad a problem - the
+        * lock will expire in 10 mins anyway).
+        */
+       _debug("not locked");
+       vnode->lock_key = key_get(key);
+       vnode->lock_type = type;
+       vnode->lock_state = AFS_VNODE_LOCK_SETTING;
        spin_unlock(&vnode->lock);
 
-       /* now we need to sleep and wait for the lock manager thread to get the
-        * lock from the server */
-       _debug("sleep");
-       ret = wait_event_interruptible(fl->fl_wait,
-                                      fl->fl_u.afs.state <= AFS_LOCK_GRANTED);
-       if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
-               ret = fl->fl_u.afs.state;
-               if (ret < 0)
-                       goto error;
-               spin_lock(&vnode->lock);
-               goto given_lock;
-       }
-
-       /* we were interrupted, but someone may still be in the throes of
-        * giving us the lock */
-       _debug("intr");
-       ASSERTCMP(ret, ==, -ERESTARTSYS);
+       ret = afs_set_lock(vnode, key, type); /* RPC */
 
        spin_lock(&vnode->lock);
-       if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
-               ret = fl->fl_u.afs.state;
-               if (ret < 0) {
-                       spin_unlock(&vnode->lock);
-                       goto error;
-               }
-               goto given_lock;
-       }
+       switch (ret) {
+       default:
+               goto abort_attempt;
 
-abort_attempt:
-       /* we aren't going to get the lock, either because we're unwilling to
-        * wait, or because some signal happened */
-       _debug("abort");
-       if (list_empty(&vnode->granted_locks) &&
-           vnode->pending_locks.next == &fl->fl_u.afs.link) {
-               if (vnode->pending_locks.prev != &fl->fl_u.afs.link) {
-                       /* kick the next pending lock into having a go */
-                       list_del_init(&fl->fl_u.afs.link);
-                       afs_lock_may_be_available(vnode);
-               }
-       } else {
-               list_del_init(&fl->fl_u.afs.link);
+       case -EWOULDBLOCK:
+               /* The server doesn't have a lock-waiting queue, so the client
+                * will have to retry.  The server will break the outstanding
+                * callbacks on a file when a lock is released.
+                */
+               _debug("would block");
+               ASSERT(list_empty(&vnode->granted_locks));
+               ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
+               vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+               goto need_to_wait;
+
+       case 0:
+               _debug("acquired");
+               break;
        }
-       spin_unlock(&vnode->lock);
-       goto error;
 
-acquired_server_lock:
        /* we've acquired a server lock, but it needs to be renewed after 5
         * mins */
-       spin_lock(&vnode->lock);
+       vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
        afs_schedule_lock_extension(vnode);
-       if (type == AFS_LOCK_READ)
-               set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
-       else
-               set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-sharing_existing_lock:
+
+share_existing_lock:
        /* the lock has been granted as far as we're concerned... */
        fl->fl_u.afs.state = AFS_LOCK_GRANTED;
        list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+
 given_lock:
        /* ... but we do still need to get the VFS's blessing */
-       ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING)));
-       ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) |
-                               (1 << AFS_VNODE_WRITELOCKED))) != 0);
+       spin_unlock(&vnode->lock);
+
        ret = posix_lock_file(file, fl, NULL);
        if (ret < 0)
                goto vfs_rejected_lock;
-       spin_unlock(&vnode->lock);
 
-       /* again, make sure we've got a callback on this file and, again, make
+       /* Again, make sure we've got a callback on this file and, again, make
         * sure that our view of the data version is up to date (we ignore
-        * errors incurred here and deal with the consequences elsewhere) */
+        * errors incurred here and deal with the consequences elsewhere).
+        */
        afs_validate(vnode, key);
+       _leave(" = 0");
+       return 0;
 
-error:
-       spin_unlock(&inode->i_lock);
+need_to_wait:
+       /* We're going to have to wait.  Either this client doesn't have a lock
+        * on the server yet and we need to wait for a callback to occur, or
+        * the client does have a lock on the server, but it belongs to some
+        * other process(es) and is incompatible with the lock we want.
+        */
+       ret = -EAGAIN;
+       if (fl->fl_flags & FL_SLEEP) {
+               spin_unlock(&vnode->lock);
+
+               _debug("sleep");
+               ret = wait_event_interruptible(fl->fl_wait,
+                                              fl->fl_u.afs.state != AFS_LOCK_PENDING);
+
+               spin_lock(&vnode->lock);
+       }
+
+       if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
+               goto given_lock;
+       if (fl->fl_u.afs.state < 0)
+               ret = fl->fl_u.afs.state;
+
+abort_attempt:
+       /* we aren't going to get the lock, either because we're unwilling to
+        * wait, or because some signal happened */
+       _debug("abort");
+       afs_dequeue_lock(vnode, fl);
+
+error_unlock:
+       spin_unlock(&vnode->lock);
        _leave(" = %d", ret);
        return ret;
 
 vfs_rejected_lock:
-       /* the VFS rejected the lock we just obtained, so we have to discard
-        * what we just got */
+       /* The VFS rejected the lock we just obtained, so we have to discard
+        * what we just got.  We defer this to the lock manager work item to
+        * deal with.
+        */
        _debug("vfs refused %d", ret);
+       spin_lock(&vnode->lock);
        list_del_init(&fl->fl_u.afs.link);
        if (list_empty(&vnode->granted_locks))
-               afs_defer_unlock(vnode, key);
-       goto abort_attempt;
+               afs_defer_unlock(vnode);
+       goto error_unlock;
 }
 
 /*
@@ -499,34 +579,21 @@ vfs_rejected_lock:
  */
 static int afs_do_unlk(struct file *file, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
-       struct key *key = afs_file_key(file);
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
        int ret;
 
        _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
 
+       /* Flush all pending writes before doing anything with locks. */
+       vfs_fsync(file, 0);
+
        /* only whole-file unlocks are supported */
        if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
                return -EINVAL;
 
-       fl->fl_ops = &afs_lock_ops;
-       INIT_LIST_HEAD(&fl->fl_u.afs.link);
-       fl->fl_u.afs.state = AFS_LOCK_PENDING;
-
-       spin_lock(&vnode->lock);
        ret = posix_lock_file(file, fl, NULL);
-       if (ret < 0) {
-               spin_unlock(&vnode->lock);
-               _leave(" = %d [vfs]", ret);
-               return ret;
-       }
-
-       /* discard the server lock only if all granted locks are gone */
-       if (list_empty(&vnode->granted_locks))
-               afs_defer_unlock(vnode, key);
-       spin_unlock(&vnode->lock);
-       _leave(" = 0");
-       return 0;
+       _leave(" = %d [%u]", ret, vnode->lock_state);
+       return ret;
 }
 
 /*
@@ -534,7 +601,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
  */
 static int afs_do_getlk(struct file *file, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
        struct key *key = afs_file_key(file);
        int ret, lock_count;
 
@@ -542,29 +609,25 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
 
        fl->fl_type = F_UNLCK;
 
-       inode_lock(&vnode->vfs_inode);
-
        /* check local lock records first */
-       ret = 0;
        posix_test_lock(file, fl);
        if (fl->fl_type == F_UNLCK) {
                /* no local locks; consult the server */
                ret = afs_fetch_status(vnode, key);
                if (ret < 0)
                        goto error;
-               lock_count = vnode->status.lock_count;
-               if (lock_count) {
-                       if (lock_count > 0)
-                               fl->fl_type = F_RDLCK;
-                       else
-                               fl->fl_type = F_WRLCK;
-                       fl->fl_start = 0;
-                       fl->fl_end = OFFSET_MAX;
-               }
+
+               lock_count = READ_ONCE(vnode->status.lock_count);
+               if (lock_count > 0)
+                       fl->fl_type = F_RDLCK;
+               else
+                       fl->fl_type = F_WRLCK;
+               fl->fl_start = 0;
+               fl->fl_end = OFFSET_MAX;
        }
 
+       ret = 0;
 error:
-       inode_unlock(&vnode->vfs_inode);
        _leave(" = %d [%hd]", ret, fl->fl_type);
        return ret;
 }
@@ -574,7 +637,7 @@ error:
  */
 int afs_lock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
 
        _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -597,7 +660,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
  */
 int afs_flock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
 
        _enter("{%x:%u},%d,{t=%x,fl=%x}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -627,9 +690,13 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
  */
 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
 {
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
        _enter("");
 
+       spin_lock(&vnode->lock);
        list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
+       spin_unlock(&vnode->lock);
 }
 
 /*
@@ -638,7 +705,12 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
  */
 static void afs_fl_release_private(struct file_lock *fl)
 {
+       struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
        _enter("");
 
-       list_del_init(&fl->fl_u.afs.link);
+       spin_lock(&vnode->lock);
+       afs_dequeue_lock(vnode, fl);
+       _debug("state %u for %p", vnode->lock_state, vnode);
+       spin_unlock(&vnode->lock);
 }
index bd8dcee7e066719439d5decb0d970e6142d167eb..e03910cebdd490321d1a4db5681715907ca92042 100644 (file)
@@ -430,6 +430,16 @@ struct afs_volume {
        u8                      name[AFS_MAXVOLNAME + 1]; /* NUL-padded volume name */
 };
 
+enum afs_lock_state {
+       AFS_VNODE_LOCK_NONE,            /* The vnode has no lock on the server */
+       AFS_VNODE_LOCK_WAITING_FOR_CB,  /* We're waiting for the server to break the callback */
+       AFS_VNODE_LOCK_SETTING,         /* We're asking the server for a lock */
+       AFS_VNODE_LOCK_GRANTED,         /* We have a lock on the server */
+       AFS_VNODE_LOCK_EXTENDING,       /* We're extending a lock on the server */
+       AFS_VNODE_LOCK_NEED_UNLOCK,     /* We need to unlock on the server */
+       AFS_VNODE_LOCK_UNLOCKING,       /* We're telling the server to unlock */
+};
+
 /*
  * AFS inode private data
  */
@@ -454,18 +464,16 @@ struct afs_vnode {
 #define AFS_VNODE_ZAP_DATA     3               /* set if vnode's data should be invalidated */
 #define AFS_VNODE_DELETED      4               /* set if vnode deleted on server */
 #define AFS_VNODE_MOUNTPOINT   5               /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_LOCKING      6               /* set if waiting for lock on vnode */
-#define AFS_VNODE_READLOCKED   7               /* set if vnode is read-locked on the server */
-#define AFS_VNODE_WRITELOCKED  8               /* set if vnode is write-locked on the server */
-#define AFS_VNODE_UNLOCKING    9               /* set if vnode is being unlocked on the server */
-#define AFS_VNODE_AUTOCELL     10              /* set if Vnode is an auto mount point */
-#define AFS_VNODE_PSEUDODIR    11              /* set if Vnode is a pseudo directory */
+#define AFS_VNODE_AUTOCELL     6               /* set if Vnode is an auto mount point */
+#define AFS_VNODE_PSEUDODIR    7               /* set if Vnode is a pseudo directory */
 
        struct list_head        wb_keys;        /* List of keys available for writeback */
        struct list_head        pending_locks;  /* locks waiting to be granted */
        struct list_head        granted_locks;  /* locks granted on this file */
        struct delayed_work     lock_work;      /* work to be done in locking */
-       struct key              *unlock_key;    /* key to be used in unlocking */
+       struct key              *lock_key;      /* Key to be used in lock ops */
+       enum afs_lock_state     lock_state : 8;
+       afs_lock_type_t         lock_type : 8;
 
        /* outstanding callback notification on this file */
        struct afs_cb_interest  *cb_interest;   /* Server on which this resides */
@@ -843,6 +851,7 @@ extern void afs_clear_permits(struct afs_vnode *);
 extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int);
 extern void afs_zap_permits(struct rcu_head *);
 extern struct key *afs_request_key(struct afs_cell *);
+extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
 extern int afs_permission(struct inode *, int);
 extern void __exit afs_clean_up_permit_cache(void);
 
index e728ca1776c9b36640dd8ad69178e5c0d07a0a29..d04511fb3879748cf7e6e98c5c53a807f90007ba 100644 (file)
@@ -46,8 +46,7 @@ bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode
                return false;
        }
 
-       if (test_bit(AFS_VNODE_READLOCKED, &vnode->flags) ||
-           test_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
+       if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
                fc->flags |= AFS_FS_CURSOR_CUR_ONLY;
        return true;
 }
@@ -117,7 +116,7 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
        case VSALVAGING:        m = "being salvaged";   break;
        default:                m = "busy";             break;
        }
-       
+
        pr_notice("kAFS: Volume %u '%s' is %s\n", volume->vid, volume->name, m);
 }
 
@@ -438,24 +437,67 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
 
        _enter("");
 
-       if (!cbi) {
-               fc->ac.error = -ESTALE;
+       switch (fc->ac.error) {
+       case SHRT_MAX:
+               if (!cbi) {
+                       fc->ac.error = -ESTALE;
+                       fc->flags |= AFS_FS_CURSOR_STOP;
+                       return false;
+               }
+
+               fc->cbi = afs_get_cb_interest(vnode->cb_interest);
+
+               read_lock(&cbi->server->fs_lock);
+               alist = rcu_dereference_protected(cbi->server->addresses,
+                                                 lockdep_is_held(&cbi->server->fs_lock));
+               afs_get_addrlist(alist);
+               read_unlock(&cbi->server->fs_lock);
+               if (!alist) {
+                       fc->ac.error = -ESTALE;
+                       fc->flags |= AFS_FS_CURSOR_STOP;
+                       return false;
+               }
+
+               fc->ac.alist = alist;
+               fc->ac.addr  = NULL;
+               fc->ac.start = READ_ONCE(alist->index);
+               fc->ac.index = fc->ac.start;
+               fc->ac.error = 0;
+               fc->ac.begun = false;
+               goto iterate_address;
+
+       case 0:
+       default:
+               /* Success or local failure.  Stop. */
                fc->flags |= AFS_FS_CURSOR_STOP;
+               _leave(" = f [okay/local %d]", fc->ac.error);
                return false;
-       }
 
-       read_lock(&cbi->server->fs_lock);
-       alist = afs_get_addrlist(cbi->server->addresses);
-       read_unlock(&cbi->server->fs_lock);
-       if (!alist) {
-               fc->ac.error = -ESTALE;
+       case -ECONNABORTED:
                fc->flags |= AFS_FS_CURSOR_STOP;
+               _leave(" = f [abort]");
                return false;
+
+       case -ENETUNREACH:
+       case -EHOSTUNREACH:
+       case -ECONNREFUSED:
+       case -ETIMEDOUT:
+       case -ETIME:
+               _debug("no conn");
+               goto iterate_address;
        }
 
-       fc->ac.alist = alist;
-       fc->ac.error = 0;
-       return true;
+iterate_address:
+       /* Iterate over the current server's address list to try and find an
+        * address on which it will respond to us.
+        */
+       if (afs_iterate_addresses(&fc->ac)) {
+               _leave(" = t");
+               return true;
+       }
+
+       afs_end_cursor(&fc->ac);
+       return false;
 }
 
 /*
index 46a881a4d08f6a48a2cb463ea332a14709dbeb47..2b00097101b37bdfcf8fd5e4780245ddf8f09c39 100644 (file)
@@ -284,8 +284,8 @@ someone_else_changed_it:
  * permitted to be accessed with this authorisation, and if so, what access it
  * is granted
  */
-static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
-                           afs_access_t *_access)
+int afs_check_permit(struct afs_vnode *vnode, struct key *key,
+                    afs_access_t *_access)
 {
        struct afs_permits *permits;
        bool valid = false;
index 26bad7032bbaec8878941e3f9c492b44e7c58e24..0ab3f84578390e1edebb13c7748ef0c9dc9369c5 100644 (file)
@@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
 {
        int i;
 
-       if (refcount_dec_and_test(&slist->usage)) {
+       if (slist && refcount_dec_and_test(&slist->usage)) {
                for (i = 0; i < slist->nr_servers; i++) {
                        afs_put_cb_interest(net, slist->servers[i].cb_interest);
                        afs_put_server(net, slist->servers[i].server);
index 875b5eb02242a0ae29b008b149770f4f6e1d3985..d3f97da61bdfc6b006b88a92a21237dcea333b62 100644 (file)
@@ -496,10 +496,10 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
                if (ret < 0)
                        goto error_sb;
                as = NULL;
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
        } else {
                _debug("reuse");
-               ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+               ASSERTCMP(sb->s_flags, &, SB_ACTIVE);
                afs_destroy_sbi(as);
                as = NULL;
        }
index 18e46e31523ccc0295a35c058baa88d0ebf960ed..cb5f8a3df5773cba37c292e65ac6e985b0af11bc 100644 (file)
@@ -119,6 +119,11 @@ try_again:
        }
 
        if (f != t) {
+               if (PageWriteback(page)) {
+                       trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
+                                            page->index, priv);
+                       goto flush_conflicting_write;
+               }
                if (to < f || from > t)
                        goto flush_conflicting_write;
                if (from < f)
index d79ced9258614010128dd8f1ed6a0cff2b525f21..82e8f6edfb48d0e8670dd58e3fbdcfb4b5ceb85d 100644 (file)
@@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
                pr_debug("waiting for mount name=%pd\n", path->dentry);
                status = autofs4_wait(sbi, path, NFY_MOUNT);
                pr_debug("mount wait done status=%d\n", status);
-               ino->last_used = jiffies;
        }
+       ino->last_used = jiffies;
        return status;
 }
 
@@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
         */
        if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
                struct dentry *parent = dentry->d_parent;
+               struct autofs_info *ino;
                struct dentry *new;
 
                new = d_lookup(parent, &dentry->d_name);
                if (!new)
                        return NULL;
-               if (new == dentry)
-                       dput(new);
-               else {
-                       struct autofs_info *ino;
-
-                       ino = autofs4_dentry_ino(new);
-                       ino->last_used = jiffies;
-                       dput(path->dentry);
-                       path->dentry = new;
-               }
+               ino = autofs4_dentry_ino(new);
+               ino->last_used = jiffies;
+               dput(path->dentry);
+               path->dentry = new;
        }
        return path->dentry;
 }
index 4ac49d038bf38f3745888e82214491e64aa6da9a..8fc41705c7cd50af4c53f71851d8a4136673411b 100644 (file)
@@ -81,7 +81,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
                spin_unlock_irqrestore(&current->sighand->siglock, flags);
        }
 
-       return (bytes > 0);
+       /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
+       return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
 }
 
 static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
@@ -95,6 +96,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
        } pkt;
        struct file *pipe = NULL;
        size_t pktsz;
+       int ret;
 
        pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
                 (unsigned long) wq->wait_queue_token,
@@ -169,7 +171,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
        mutex_unlock(&sbi->wq_mutex);
 
        if (autofs4_write(sbi, pipe, &pkt, pktsz))
+       switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
+       case 0:
+               break;
+       case -ENOMEM:
+       case -ERESTARTSYS:
+               /* Just fail this one */
+               autofs4_wait_release(sbi, wq->wait_queue_token, ret);
+               break;
+       default:
                autofs4_catatonic_mode(sbi);
+               break;
+       }
        fput(pipe);
 }
 
index 75a461cfaca620656b0d03c0e01b2eb4a8792ad6..16f2dfe8c2f742e4264bf60bc96562d5a6e8292d 100644 (file)
@@ -365,7 +365,7 @@ Version 0.4 (2001-10-28)
        (fs/befs/super.c)
 
 * Tell the kernel to only mount befs read-only. 
-       By setting the MS_RDONLY flag in befs_read_super().
+       By setting the SB_RDONLY flag in befs_read_super().
        Not that it was possible to write before. But now the kernel won't even try.
        (fs/befs/super.c)
 
index a92355cc453bf6cb09016e7f30fdba677c44bb48..ee236231cafac001ff6db87f5a94f515540e2adc 100644 (file)
@@ -841,7 +841,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        if (!sb_rdonly(sb)) {
                befs_warning(sb,
                             "No write support. Marking filesystem read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /*
@@ -948,7 +948,7 @@ static int
 befs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                return -EINVAL;
        return 0;
 }
index b35ce16b3df3c6550a69289077eb922bcd5d76cb..5982c8a71f02fde26b84fc54067dd075a993f8e6 100644 (file)
@@ -295,7 +295,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                 unsigned long len, u64 disk_start,
                                 unsigned long compressed_len,
                                 struct page **compressed_pages,
-                                unsigned long nr_pages)
+                                unsigned long nr_pages,
+                                unsigned int write_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct bio *bio = NULL;
@@ -327,7 +328,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
        bdev = fs_info->fs_devices->latest_bdev;
 
        bio = btrfs_bio_alloc(bdev, first_byte);
-       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+       bio->bi_opf = REQ_OP_WRITE | write_flags;
        bio->bi_private = cb;
        bio->bi_end_io = end_compressed_bio_write;
        refcount_set(&cb->pending_bios, 1);
@@ -374,7 +375,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                        bio_put(bio);
 
                        bio = btrfs_bio_alloc(bdev, first_byte);
-                       bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+                       bio->bi_opf = REQ_OP_WRITE | write_flags;
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
                        bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -1528,5 +1529,5 @@ unsigned int btrfs_compress_str2level(const char *str)
        if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
                return str[5] - '0';
 
-       return 0;
+       return BTRFS_ZLIB_DEFAULT_LEVEL;
 }
index da20755ebf2183f1800dce91feab4d10b84793d8..0868cc554f145a7e20b9a0bf281e0990af6ee9e2 100644 (file)
@@ -34,6 +34,8 @@
 /* Maximum size of data before compression */
 #define BTRFS_MAX_UNCOMPRESSED         (SZ_128K)
 
+#define        BTRFS_ZLIB_DEFAULT_LEVEL                3
+
 struct compressed_bio {
        /* number of bios pending for this compressed extent */
        refcount_t pending_bios;
@@ -91,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long len, u64 disk_start,
                                  unsigned long compressed_len,
                                  struct page **compressed_pages,
-                                 unsigned long nr_pages);
+                                 unsigned long nr_pages,
+                                 unsigned int write_flags);
 blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
 
index f7df5536ab61e1f6de0512328341c5a637d040d3..13c260b525a1282aded755e5bb0141458ec52452 100644 (file)
@@ -2957,7 +2957,7 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
  */
 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
 {
-       return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
+       return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
 }
 
 static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@@ -3180,6 +3180,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
                               int nr);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             unsigned int extra_bits,
                              struct extent_state **cached_state, int dedupe);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
                             struct btrfs_root *new_root,
index efce9a2fa9be09e47f29095b58ff8d2cd355eb12..10a2a579cc7f6a3569212444fb37dd5ddcf0c0c7 100644 (file)
@@ -610,7 +610,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
         * that we don't try and read the other copies of this block, just
         * return -EIO.
         */
-       if (found_level == 0 && btrfs_check_leaf(root, eb)) {
+       if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
                set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
                ret = -EIO;
        }
@@ -3848,7 +3848,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
                                         buf->len,
                                         fs_info->dirty_metadata_batch);
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-       if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(root, buf)) {
+       /*
+        * Since btrfs_mark_buffer_dirty() can be called with item pointer set
+        * but item data not updated.
+        * So here we should only check item pointers, not item data.
+        */
+       if (btrfs_header_level(buf) == 0 &&
+           btrfs_check_leaf_relaxed(root, buf)) {
                btrfs_print_leaf(buf);
                ASSERT(0);
        }
index 7208ecef70889833ac2caa7d3d5d8b4b634a4ee0..4497f937e8fb7ce608f8ef7cc8db1c7a51bcd2a3 100644 (file)
@@ -3502,13 +3502,6 @@ again:
                goto again;
        }
 
-       /* We've already setup this transaction, go ahead and exit */
-       if (block_group->cache_generation == trans->transid &&
-           i_size_read(inode)) {
-               dcs = BTRFS_DC_SETUP;
-               goto out_put;
-       }
-
        /*
         * We want to set the generation to 0, that way if anything goes wrong
         * from here on out we know not to trust this cache when we load up next
@@ -3532,6 +3525,13 @@ again:
        }
        WARN_ON(ret);
 
+       /* We've already setup this transaction, go ahead and exit */
+       if (block_group->cache_generation == trans->transid &&
+           i_size_read(inode)) {
+               dcs = BTRFS_DC_SETUP;
+               goto out_put;
+       }
+
        if (i_size_read(inode) > 0) {
                ret = btrfs_check_trunc_cache_free_space(fs_info,
                                        &fs_info->global_block_rsv);
index 16045ea86fc13ef6858289c4db8cfb8ed5469d7c..012d63870b99acfc180ef0cd05fb337e39730959 100644 (file)
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
        struct btrfs_bio *bbio = NULL;
        int ret;
 
-       ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
+       ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
        BUG_ON(!mirror_num);
 
        bio = btrfs_io_bio_alloc(1);
@@ -3253,7 +3253,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                                               delalloc_start,
                                               delalloc_end,
                                               &page_started,
-                                              nr_written);
+                                              nr_written, wbc);
                /* File system has been set read-only */
                if (ret) {
                        SetPageError(page);
index 4a8861379d3ef23ef49dfc1b418bbd22243402de..93dcae0c3183009c3668dfa34b449051c7576719 100644 (file)
@@ -116,7 +116,8 @@ struct extent_io_ops {
         */
        int (*fill_delalloc)(void *private_data, struct page *locked_page,
                             u64 start, u64 end, int *page_started,
-                            unsigned long *nr_written);
+                            unsigned long *nr_written,
+                            struct writeback_control *wbc);
 
        int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
        void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -365,10 +366,11 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
                       struct extent_state **cached_state);
 
 static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
-               u64 end, struct extent_state **cached_state)
+                                     u64 end, unsigned int extra_bits,
+                                     struct extent_state **cached_state)
 {
        return set_extent_bit(tree, start, end,
-                             EXTENT_DELALLOC | EXTENT_UPTODATE,
+                             EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
                              NULL, cached_state, GFP_NOFS);
 }
 
index f80254d82f409bedc91bbef14364726beeea174c..eb1bac7c8553c7a4172735027765bf16619e9d00 100644 (file)
@@ -477,6 +477,47 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
        }
 }
 
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+                                        const u64 start,
+                                        const u64 len,
+                                        struct extent_state **cached_state)
+{
+       u64 search_start = start;
+       const u64 end = start + len - 1;
+
+       while (search_start < end) {
+               const u64 search_len = end - search_start + 1;
+               struct extent_map *em;
+               u64 em_len;
+               int ret = 0;
+
+               em = btrfs_get_extent(inode, NULL, 0, search_start,
+                                     search_len, 0);
+               if (IS_ERR(em))
+                       return PTR_ERR(em);
+
+               if (em->block_start != EXTENT_MAP_HOLE)
+                       goto next;
+
+               em_len = em->len;
+               if (em->start < search_start)
+                       em_len -= search_start - em->start;
+               if (em_len > search_len)
+                       em_len = search_len;
+
+               ret = set_extent_bit(&inode->io_tree, search_start,
+                                    search_start + em_len - 1,
+                                    EXTENT_DELALLOC_NEW,
+                                    NULL, cached_state, GFP_NOFS);
+next:
+               search_start = extent_map_end(em);
+               free_extent_map(em);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
 /*
  * after copy_from_user, pages need to be dirtied and we need to make
  * sure holes are created between the current EOF and the start of
@@ -497,14 +538,34 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
        u64 end_of_last_block;
        u64 end_pos = pos + write_bytes;
        loff_t isize = i_size_read(inode);
+       unsigned int extra_bits = 0;
 
        start_pos = pos & ~((u64) fs_info->sectorsize - 1);
        num_bytes = round_up(write_bytes + pos - start_pos,
                             fs_info->sectorsize);
 
        end_of_last_block = start_pos + num_bytes - 1;
+
+       if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
+               if (start_pos >= isize &&
+                   !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
+                       /*
+                        * There can't be any extents following eof in this case
+                        * so just set the delalloc new bit for the range
+                        * directly.
+                        */
+                       extra_bits |= EXTENT_DELALLOC_NEW;
+               } else {
+                       err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
+                                                           start_pos,
+                                                           num_bytes, cached);
+                       if (err)
+                               return err;
+               }
+       }
+
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
-                                       cached, 0);
+                                       extra_bits, cached, 0);
        if (err)
                return err;
 
@@ -1404,47 +1465,6 @@ fail:
 
 }
 
-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
-                                        const u64 start,
-                                        const u64 len,
-                                        struct extent_state **cached_state)
-{
-       u64 search_start = start;
-       const u64 end = start + len - 1;
-
-       while (search_start < end) {
-               const u64 search_len = end - search_start + 1;
-               struct extent_map *em;
-               u64 em_len;
-               int ret = 0;
-
-               em = btrfs_get_extent(inode, NULL, 0, search_start,
-                                     search_len, 0);
-               if (IS_ERR(em))
-                       return PTR_ERR(em);
-
-               if (em->block_start != EXTENT_MAP_HOLE)
-                       goto next;
-
-               em_len = em->len;
-               if (em->start < search_start)
-                       em_len -= search_start - em->start;
-               if (em_len > search_len)
-                       em_len = search_len;
-
-               ret = set_extent_bit(&inode->io_tree, search_start,
-                                    search_start + em_len - 1,
-                                    EXTENT_DELALLOC_NEW,
-                                    NULL, cached_state, GFP_NOFS);
-next:
-               search_start = extent_map_end(em);
-               free_extent_map(em);
-               if (ret)
-                       return ret;
-       }
-       return 0;
-}
-
 /*
  * This function locks the extent and properly waits for data=ordered extents
  * to finish before allowing the pages to be modified if need.
@@ -1473,10 +1493,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                + round_up(pos + write_bytes - start_pos,
                           fs_info->sectorsize) - 1;
 
-       if (start_pos < inode->vfs_inode.i_size ||
-           (inode->flags & BTRFS_INODE_PREALLOC)) {
+       if (start_pos < inode->vfs_inode.i_size) {
                struct btrfs_ordered_extent *ordered;
-               unsigned int clear_bits;
 
                lock_extent_bits(&inode->io_tree, start_pos, last_pos,
                                cached_state);
@@ -1498,19 +1516,10 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                }
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
-               ret = btrfs_find_new_delalloc_bytes(inode, start_pos,
-                                                   last_pos - start_pos + 1,
-                                                   cached_state);
-               clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
-                       EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
-               if (ret)
-                       clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
-               clear_extent_bit(&inode->io_tree, start_pos,
-                                last_pos, clear_bits,
-                                (clear_bits & EXTENT_LOCKED) ? 1 : 0,
-                                0, cached_state, GFP_NOFS);
-               if (ret)
-                       return ret;
+               clear_extent_bit(&inode->io_tree, start_pos, last_pos,
+                                EXTENT_DIRTY | EXTENT_DELALLOC |
+                                EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+                                0, 0, cached_state, GFP_NOFS);
                *lockstart = start_pos;
                *lockend = last_pos;
                ret = 1;
@@ -2048,6 +2057,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
+       btrfs_init_log_ctx(&ctx, inode);
+
        /*
         * We write the dirty pages in the range and wait until they complete
         * out of the ->i_mutex. If so, we can flush the dirty pages by
@@ -2194,8 +2205,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        }
        trans->sync = true;
 
-       btrfs_init_log_ctx(&ctx, inode);
-
        ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
                /* Fallthrough and commit/free transaction. */
@@ -2253,6 +2262,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                ret = btrfs_end_transaction(trans);
        }
 out:
+       ASSERT(list_empty(&ctx.list));
        err = file_check_and_advance_wb_err(file);
        if (!ret)
                ret = err;
index cdc9f4015ec36c08688a81bb1ba2bf657a9845ef..4426d1c73e50f1d1b1105a182d9c982a28b8ff08 100644 (file)
@@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        /* Lock all pages first so we can lock the extent safely. */
        ret = io_ctl_prepare_pages(io_ctl, inode, 0);
        if (ret)
-               goto out;
+               goto out_unlock;
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         &cached_state);
@@ -1358,6 +1358,7 @@ out_nospc_locked:
 out_nospc:
        cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
 
+out_unlock:
        if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
                up_write(&block_group->data_rwsem);
 
index b93fe05a39c7643247298c4b7d12eccae39b9790..993061f83067a9a65c5a58908908f5948423f11a 100644 (file)
@@ -378,6 +378,7 @@ struct async_cow {
        struct page *locked_page;
        u64 start;
        u64 end;
+       unsigned int write_flags;
        struct list_head extents;
        struct btrfs_work work;
 };
@@ -857,7 +858,8 @@ retry:
                                    async_extent->ram_size,
                                    ins.objectid,
                                    ins.offset, async_extent->pages,
-                                   async_extent->nr_pages)) {
+                                   async_extent->nr_pages,
+                                   async_cow->write_flags)) {
                        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
                        struct page *p = async_extent->pages[0];
                        const u64 start = async_extent->start;
@@ -1191,7 +1193,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
 
 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                                u64 start, u64 end, int *page_started,
-                               unsigned long *nr_written)
+                               unsigned long *nr_written,
+                               unsigned int write_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct async_cow *async_cow;
@@ -1208,6 +1211,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                async_cow->root = root;
                async_cow->locked_page = locked_page;
                async_cow->start = start;
+               async_cow->write_flags = write_flags;
 
                if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
                    !btrfs_test_opt(fs_info, FORCE_COMPRESS))
@@ -1577,11 +1581,13 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
  */
 static int run_delalloc_range(void *private_data, struct page *locked_page,
                              u64 start, u64 end, int *page_started,
-                             unsigned long *nr_written)
+                             unsigned long *nr_written,
+                             struct writeback_control *wbc)
 {
        struct inode *inode = private_data;
        int ret;
        int force_cow = need_force_cow(inode, start, end);
+       unsigned int write_flags = wbc_to_write_flags(wbc);
 
        if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
                ret = run_delalloc_nocow(inode, locked_page, start, end,
@@ -1596,7 +1602,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
                set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
                        &BTRFS_I(inode)->runtime_flags);
                ret = cow_file_range_async(inode, locked_page, start, end,
-                                          page_started, nr_written);
+                                          page_started, nr_written,
+                                          write_flags);
        }
        if (ret)
                btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
@@ -2025,11 +2032,12 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+                             unsigned int extra_bits,
                              struct extent_state **cached_state, int dedupe)
 {
        WARN_ON((end & (PAGE_SIZE - 1)) == 0);
        return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
-                                  cached_state);
+                                  extra_bits, cached_state);
 }
 
 /* see btrfs_writepage_start_hook for details on why this is required */
@@ -2090,7 +2098,7 @@ again:
                goto out;
         }
 
-       btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
+       btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
                                  0);
        ClearPageChecked(page);
        set_page_dirty(page);
@@ -4790,7 +4798,7 @@ again:
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
+       ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
                                        &cached_state, 0);
        if (ret) {
                unlock_extent_cached(io_tree, block_start, block_end,
@@ -5438,6 +5446,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
                goto out_err;
 
        btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
+       if (location->type != BTRFS_INODE_ITEM_KEY &&
+           location->type != BTRFS_ROOT_ITEM_KEY) {
+               btrfs_warn(root->fs_info,
+"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
+                          __func__, name, btrfs_ino(BTRFS_I(dir)),
+                          location->objectid, location->type, location->offset);
+               goto out_err;
+       }
 out:
        btrfs_free_path(path);
        return ret;
@@ -5754,8 +5770,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
                return inode;
        }
 
-       BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
-
        index = srcu_read_lock(&fs_info->subvol_srcu);
        ret = fixup_tree_root_location(fs_info, dir, dentry,
                                       &location, &sub_root);
@@ -9150,7 +9164,7 @@ again:
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, page_start, end,
+       ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
                                        &cached_state, 0);
        if (ret) {
                unlock_extent_cached(io_tree, page_start, page_end,
index fd172a93d11a9bb531c43d7c5188631f3d44aefa..d748ad1c3620a7e99ff9706902846362a165a05a 100644 (file)
@@ -1172,7 +1172,7 @@ again:
        if (!i_done || ret)
                goto out;
 
-       if (!(inode->i_sb->s_flags & MS_ACTIVE))
+       if (!(inode->i_sb->s_flags & SB_ACTIVE))
                goto out;
 
        /*
@@ -1333,7 +1333,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                 * make sure we stop running if someone unmounts
                 * the FS
                 */
-               if (!(inode->i_sb->s_flags & MS_ACTIVE))
+               if (!(inode->i_sb->s_flags & SB_ACTIVE))
                        break;
 
                if (btrfs_defrag_cancelled(fs_info)) {
index 4cf2eb67eba6ceceeae466c69bf4b5dc188a7339..f0c3f00e97cbe76e1fa8484efc8933842856c8d5 100644 (file)
@@ -3268,7 +3268,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        nr++;
                }
 
-               btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
+               btrfs_set_extent_delalloc(inode, page_start, page_end, 0, NULL,
+                                         0);
                set_page_dirty(page);
 
                unlock_extent(&BTRFS_I(inode)->io_tree,
index c10e4c70f02d15b2bbaba994f406d7a0a27fbd70..20d3300bd26896a905502b0ba5b5a0372a59e849 100644 (file)
@@ -3521,7 +3521,40 @@ out:
 }
 
 /*
- * Check if ino ino1 is an ancestor of inode ino2 in the given root.
+ * Check if inode ino2, or any of its ancestors, is inode ino1.
+ * Return 1 if true, 0 if false and < 0 on error.
+ */
+static int check_ino_in_path(struct btrfs_root *root,
+                            const u64 ino1,
+                            const u64 ino1_gen,
+                            const u64 ino2,
+                            const u64 ino2_gen,
+                            struct fs_path *fs_path)
+{
+       u64 ino = ino2;
+
+       if (ino1 == ino2)
+               return ino1_gen == ino2_gen;
+
+       while (ino > BTRFS_FIRST_FREE_OBJECTID) {
+               u64 parent;
+               u64 parent_gen;
+               int ret;
+
+               fs_path_reset(fs_path);
+               ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
+               if (ret < 0)
+                       return ret;
+               if (parent == ino1)
+                       return parent_gen == ino1_gen;
+               ino = parent;
+       }
+       return 0;
+}
+
+/*
+ * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
+ * possible path (in case ino2 is not a directory and has multiple hard links).
  * Return 1 if true, 0 if false and < 0 on error.
  */
 static int is_ancestor(struct btrfs_root *root,
@@ -3530,36 +3563,91 @@ static int is_ancestor(struct btrfs_root *root,
                       const u64 ino2,
                       struct fs_path *fs_path)
 {
-       u64 ino = ino2;
-       bool free_path = false;
+       bool free_fs_path = false;
        int ret = 0;
+       struct btrfs_path *path = NULL;
+       struct btrfs_key key;
 
        if (!fs_path) {
                fs_path = fs_path_alloc();
                if (!fs_path)
                        return -ENOMEM;
-               free_path = true;
+               free_fs_path = true;
        }
 
-       while (ino > BTRFS_FIRST_FREE_OBJECTID) {
-               u64 parent;
-               u64 parent_gen;
+       path = alloc_path_for_send();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
-               fs_path_reset(fs_path);
-               ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
-               if (ret < 0) {
-                       if (ret == -ENOENT && ino == ino2)
-                               ret = 0;
-                       goto out;
+       key.objectid = ino2;
+       key.type = BTRFS_INODE_REF_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+
+       while (true) {
+               struct extent_buffer *leaf = path->nodes[0];
+               int slot = path->slots[0];
+               u32 cur_offset = 0;
+               u32 item_size;
+
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto out;
+                       if (ret > 0)
+                               break;
+                       continue;
                }
-               if (parent == ino1) {
-                       ret = parent_gen == ino1_gen ? 1 : 0;
-                       goto out;
+
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+               if (key.objectid != ino2)
+                       break;
+               if (key.type != BTRFS_INODE_REF_KEY &&
+                   key.type != BTRFS_INODE_EXTREF_KEY)
+                       break;
+
+               item_size = btrfs_item_size_nr(leaf, slot);
+               while (cur_offset < item_size) {
+                       u64 parent;
+                       u64 parent_gen;
+
+                       if (key.type == BTRFS_INODE_EXTREF_KEY) {
+                               unsigned long ptr;
+                               struct btrfs_inode_extref *extref;
+
+                               ptr = btrfs_item_ptr_offset(leaf, slot);
+                               extref = (struct btrfs_inode_extref *)
+                                       (ptr + cur_offset);
+                               parent = btrfs_inode_extref_parent(leaf,
+                                                                  extref);
+                               cur_offset += sizeof(*extref);
+                               cur_offset += btrfs_inode_extref_name_len(leaf,
+                                                                 extref);
+                       } else {
+                               parent = key.offset;
+                               cur_offset = item_size;
+                       }
+
+                       ret = get_inode_info(root, parent, NULL, &parent_gen,
+                                            NULL, NULL, NULL, NULL);
+                       if (ret < 0)
+                               goto out;
+                       ret = check_ino_in_path(root, ino1, ino1_gen,
+                                               parent, parent_gen, fs_path);
+                       if (ret)
+                               goto out;
                }
-               ino = parent;
+               path->slots[0]++;
        }
+       ret = 0;
  out:
-       if (free_path)
+       btrfs_free_path(path);
+       if (free_fs_path)
                fs_path_free(fs_path);
        return ret;
 }
index 65af029559b58a793961623412010eee71f80d83..3a4dce1536455416a4f609f99ec416b61811f592 100644 (file)
@@ -107,7 +107,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
                return;
 
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                btrfs_info(fs_info, "forced readonly");
                /*
                 * Note that a running device replace operation is not
@@ -137,7 +137,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
 
        /*
         * Special case: if the error is EROFS, and we're already
-        * under MS_RDONLY, then it is safe here.
+        * under SB_RDONLY, then it is safe here.
         */
        if (errno == -EROFS && sb_rdonly(sb))
                return;
@@ -168,7 +168,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
        set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 
        /* Don't go through full error handling during mount */
-       if (sb->s_flags & MS_BORN)
+       if (sb->s_flags & SB_BORN)
                btrfs_handle_error(fs_info);
 }
 
@@ -507,9 +507,18 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                            token == Opt_compress_force ||
                            strncmp(args[0].from, "zlib", 4) == 0) {
                                compress_type = "zlib";
+
                                info->compress_type = BTRFS_COMPRESS_ZLIB;
-                               info->compress_level =
-                                       btrfs_compress_str2level(args[0].from);
+                               info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
+                               /*
+                                * args[0] contains uninitialized data since
+                                * for these tokens we don't expect any
+                                * parameter.
+                                */
+                               if (token != Opt_compress &&
+                                   token != Opt_compress_force)
+                                       info->compress_level =
+                                         btrfs_compress_str2level(args[0].from);
                                btrfs_set_opt(info->mount_opt, COMPRESS);
                                btrfs_clear_opt(info->mount_opt, NODATACOW);
                                btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -625,7 +634,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        break;
                case Opt_acl:
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-                       info->sb->s_flags |= MS_POSIXACL;
+                       info->sb->s_flags |= SB_POSIXACL;
                        break;
 #else
                        btrfs_err(info, "support for ACL not compiled in!");
@@ -633,7 +642,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        goto out;
 #endif
                case Opt_noacl:
-                       info->sb->s_flags &= ~MS_POSIXACL;
+                       info->sb->s_flags &= ~SB_POSIXACL;
                        break;
                case Opt_notreelog:
                        btrfs_set_and_info(info, NOTREELOG,
@@ -851,7 +860,7 @@ check:
        /*
         * Extra check for current option against current flag
         */
-       if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
+       if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
                btrfs_err(info,
                          "nologreplay must be used with ro mount option");
                ret = -EINVAL;
@@ -1147,7 +1156,7 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_xattr = btrfs_xattr_handlers;
        sb->s_time_gran = 1;
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        sb->s_flags |= SB_I_VERSION;
        sb->s_iflags |= SB_I_CGROUPWB;
@@ -1180,7 +1189,7 @@ static int btrfs_fill_super(struct super_block *sb,
        }
 
        cleancache_init_fs(sb);
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
        return 0;
 
 fail_close:
@@ -1277,7 +1286,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
                seq_puts(seq, ",flushoncommit");
        if (btrfs_test_opt(info, DISCARD))
                seq_puts(seq, ",discard");
-       if (!(info->sb->s_flags & MS_POSIXACL))
+       if (!(info->sb->s_flags & SB_POSIXACL))
                seq_puts(seq, ",noacl");
        if (btrfs_test_opt(info, SPACE_CACHE))
                seq_puts(seq, ",space_cache");
@@ -1409,11 +1418,11 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
 
        mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
        if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
-               if (flags & MS_RDONLY) {
-                       mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY,
+               if (flags & SB_RDONLY) {
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~SB_RDONLY,
                                             device_name, newargs);
                } else {
-                       mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY,
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags | SB_RDONLY,
                                             device_name, newargs);
                        if (IS_ERR(mnt)) {
                                root = ERR_CAST(mnt);
@@ -1565,7 +1574,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        u64 subvol_objectid = 0;
        int error = 0;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        error = btrfs_parse_early_options(data, mode, fs_type,
@@ -1619,13 +1628,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (error)
                goto error_fs_info;
 
-       if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
+       if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
                error = -EACCES;
                goto error_close_devices;
        }
 
        bdev = fs_devices->latest_bdev;
-       s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC,
+       s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
                 fs_info);
        if (IS_ERR(s)) {
                error = PTR_ERR(s);
@@ -1635,7 +1644,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (s->s_root) {
                btrfs_close_devices(fs_devices);
                free_fs_info(fs_info);
-               if ((flags ^ s->s_flags) & MS_RDONLY)
+               if ((flags ^ s->s_flags) & SB_RDONLY)
                        error = -EBUSY;
        } else {
                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
@@ -1702,11 +1711,11 @@ static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
 {
        if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
            (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
-            (flags & MS_RDONLY))) {
+            (flags & SB_RDONLY))) {
                /* wait for any defraggers to finish */
                wait_event(fs_info->transaction_wait,
                           (atomic_read(&fs_info->defrag_running) == 0));
-               if (flags & MS_RDONLY)
+               if (flags & SB_RDONLY)
                        sync_filesystem(fs_info->sb);
        }
 }
@@ -1766,10 +1775,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
        btrfs_resize_thread_pool(fs_info,
                fs_info->thread_pool_size, old_thread_pool_size);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out;
 
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                /*
                 * this also happens on 'umount -rf' or on shutdown, when
                 * the filesystem is busy.
@@ -1781,10 +1790,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                /* avoid complains from lockdep et al. */
                up(&fs_info->uuid_tree_rescan_sem);
 
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
                /*
-                * Setting MS_RDONLY will put the cleaner thread to
+                * Setting SB_RDONLY will put the cleaner thread to
                 * sleep at the next loop if it's already active.
                 * If it's already asleep, we'll leave unused block
                 * groups on disk until we're mounted read-write again
@@ -1856,7 +1865,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                                goto restore;
                        }
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                set_bit(BTRFS_FS_OPEN, &fs_info->flags);
        }
@@ -1866,9 +1875,9 @@ out:
        return 0;
 
 restore:
-       /* We've hit an error - don't reset MS_RDONLY */
+       /* We've hit an error - don't reset SB_RDONLY */
        if (sb_rdonly(sb))
-               old_flags |= MS_RDONLY;
+               old_flags |= SB_RDONLY;
        sb->s_flags = old_flags;
        fs_info->mount_opt = old_opts;
        fs_info->compress_type = old_compress_type;
index d06b1c931d05b8ef2b06c8dbc26d7657c92010c7..2e7f64a3b22b7d55d0bc6abfccd09171ef943351 100644 (file)
@@ -114,7 +114,7 @@ static int test_find_delalloc(u32 sectorsize)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
+       set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -145,7 +145,7 @@ static int test_find_delalloc(u32 sectorsize)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
+       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -200,7 +200,7 @@ static int test_find_delalloc(u32 sectorsize)
         *
         * We are re-using our test_start from above since it works out well.
         */
-       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
+       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
index f797642c013dadc24f5391fd148bcdc6d320c563..30affb60da514848ef8fb7621a48e629e893feb3 100644 (file)
@@ -968,7 +968,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        btrfs_test_inode_set_ops(inode);
 
        /* [BTRFS_MAX_EXTENT_SIZE] */
-       ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
+       ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0,
                                        NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -984,7 +984,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
                                        BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
-                                       NULL, 0);
+                                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1018,7 +1018,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
                                        (BTRFS_MAX_EXTENT_SIZE >> 1)
                                        + sectorsize - 1,
-                                       NULL, 0);
+                                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1036,7 +1036,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
                        (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
-                       NULL, 0);
+                       0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1053,7 +1053,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
        */
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + sectorsize,
-                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1089,7 +1089,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
         */
        ret = btrfs_set_extent_delalloc(inode,
                        BTRFS_MAX_EXTENT_SIZE + sectorsize,
-                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
index 114fc5f0ecc5efb30e2c16c6e1501ed1b6c3bca2..ce4ed6ec8f39276c7c7a5d3551b043a9fd8c08b4 100644 (file)
@@ -242,7 +242,8 @@ static int check_leaf_item(struct btrfs_root *root,
        return ret;
 }
 
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
+static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
+                     bool check_item_data)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        /* No valid key type is 0, so all key should be larger than this key */
@@ -361,10 +362,15 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
                        return -EUCLEAN;
                }
 
-               /* Check if the item size and content meet other criteria */
-               ret = check_leaf_item(root, leaf, &key, slot);
-               if (ret < 0)
-                       return ret;
+               if (check_item_data) {
+                       /*
+                        * Check if the item size and content meet other
+                        * criteria
+                        */
+                       ret = check_leaf_item(root, leaf, &key, slot);
+                       if (ret < 0)
+                               return ret;
+               }
 
                prev_key.objectid = key.objectid;
                prev_key.type = key.type;
@@ -374,6 +380,17 @@ int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
        return 0;
 }
 
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+       return check_leaf(root, leaf, true);
+}
+
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+                            struct extent_buffer *leaf)
+{
+       return check_leaf(root, leaf, false);
+}
+
 int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
 {
        unsigned long nr = btrfs_header_nritems(node);
index 96c486e95d7042eaaa6476eeb98ada600cf2afe6..3d53e8d6fda0ca8312dd2477d9dd36fc3ba245b8 100644 (file)
 #include "ctree.h"
 #include "extent_io.h"
 
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf);
+/*
+ * Comprehensive leaf checker.
+ * Will check not only the item pointers, but also every possible member
+ * in item data.
+ */
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
+
+/*
+ * Less strict leaf checker.
+ * Will only check item pointers, not reading item data.
+ */
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+                            struct extent_buffer *leaf);
 int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
 
 #endif
index aa7c71cff575a5a3e1d73b8194239936cc509877..7bf9b31561db14ec7159fd0b7479e6bdee149735 100644 (file)
@@ -4102,7 +4102,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
 
        if (ordered_io_err) {
                ctx->io_err = -EIO;
-               return 0;
+               return ctx->io_err;
        }
 
        btrfs_init_map_token(&token);
index f1ecb938ba4d71b4a83c1be50bf5880bc86add38..49810b70afd3941721246497d94c754ec2120619 100644 (file)
@@ -189,6 +189,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
                                    struct btrfs_device, dev_list);
                list_del(&device->dev_list);
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
        }
        kfree(fs_devices);
@@ -578,6 +579,7 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
                                fs_devs->num_devices--;
                                list_del(&dev->dev_list);
                                rcu_string_free(dev->name);
+                               bio_put(dev->flush_bio);
                                kfree(dev);
                        }
                        break;
@@ -630,6 +632,7 @@ static noinline int device_list_add(const char *path,
 
                name = rcu_string_strdup(path, GFP_NOFS);
                if (!name) {
+                       bio_put(device->flush_bio);
                        kfree(device);
                        return -ENOMEM;
                }
@@ -742,6 +745,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                        name = rcu_string_strdup(orig_dev->name->str,
                                        GFP_KERNEL);
                        if (!name) {
+                               bio_put(device->flush_bio);
                                kfree(device);
                                goto error;
                        }
@@ -807,6 +811,7 @@ again:
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
        }
 
@@ -1750,20 +1755,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
        key.offset = device->devid;
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-       if (ret < 0)
-               goto out;
-
-       if (ret > 0) {
-               ret = -ENOENT;
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
                goto out;
        }
 
        ret = btrfs_del_item(trans, root, path);
-       if (ret)
-               goto out;
+       if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+       }
+
 out:
        btrfs_free_path(path);
-       btrfs_commit_transaction(trans);
+       if (!ret)
+               ret = btrfs_commit_transaction(trans);
        return ret;
 }
 
@@ -1993,7 +2002,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
        fs_devices = srcdev->fs_devices;
 
        list_del_rcu(&srcdev->dev_list);
-       list_del_rcu(&srcdev->dev_alloc_list);
+       list_del(&srcdev->dev_alloc_list);
        fs_devices->num_devices--;
        if (srcdev->missing)
                fs_devices->missing_devices--;
@@ -2349,6 +2358,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
 
        name = rcu_string_strdup(device_path, GFP_KERNEL);
        if (!name) {
+               bio_put(device->flush_bio);
                kfree(device);
                ret = -ENOMEM;
                goto error;
@@ -2358,6 +2368,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
                rcu_string_free(device->name);
+               bio_put(device->flush_bio);
                kfree(device);
                ret = PTR_ERR(trans);
                goto error;
@@ -2384,7 +2395,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
 
        if (seeding_dev) {
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
                ret = btrfs_prepare_sprout(fs_info);
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
@@ -2497,10 +2508,11 @@ error_sysfs:
        btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
 error_trans:
        if (seeding_dev)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        if (trans)
                btrfs_end_transaction(trans);
        rcu_string_free(device->name);
+       bio_put(device->flush_bio);
        kfree(device);
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -2567,6 +2579,7 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
 
        name = rcu_string_strdup(device_path, GFP_KERNEL);
        if (!name) {
+               bio_put(device->flush_bio);
                kfree(device);
                ret = -ENOMEM;
                goto error;
@@ -6284,6 +6297,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
 
                ret = find_next_devid(fs_info, &tmp);
                if (ret) {
+                       bio_put(dev->flush_bio);
                        kfree(dev);
                        return ERR_PTR(ret);
                }
index ff5d32cf9578f77aa28bebd2634fd61cb3d27064..a14b2c974c9eacea27943fe7a995e21ce4e1bb3a 100644 (file)
@@ -1160,7 +1160,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        struct ceph_inode_info *ci = cap->ci;
        struct inode *inode = &ci->vfs_inode;
        struct cap_msg_args arg;
-       int held, revoking, dropping;
+       int held, revoking;
        int wake = 0;
        int delayed = 0;
        int ret;
@@ -1168,7 +1168,6 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        held = cap->issued | cap->implemented;
        revoking = cap->implemented & ~cap->issued;
        retain &= ~revoking;
-       dropping = cap->issued & ~retain;
 
        dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
             inode, cap, cap->session,
@@ -1712,7 +1711,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
 
        /* if we are unmounting, flush any unused caps immediately. */
        if (mdsc->stopping)
-               is_delayed = 1;
+               is_delayed = true;
 
        spin_lock(&ci->i_ceph_lock);
 
@@ -3189,8 +3188,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
        int dirty = le32_to_cpu(m->dirty);
        int cleaned = 0;
        bool drop = false;
-       bool wake_ci = 0;
-       bool wake_mdsc = 0;
+       bool wake_ci = false;
+       bool wake_mdsc = false;
 
        list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
                if (cf->tid == flush_tid)
index f2550a076edc4e65da6e36354ef9ba2ba517d184..ab81652198c48e1e90a5545cb06089a6fa30da1a 100644 (file)
@@ -493,6 +493,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        ci->i_wb_ref = 0;
        ci->i_wrbuffer_ref = 0;
        ci->i_wrbuffer_ref_head = 0;
+       atomic_set(&ci->i_filelock_ref, 0);
        ci->i_shared_gen = 0;
        ci->i_rdcache_gen = 0;
        ci->i_rdcache_revoking = 0;
@@ -786,7 +787,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
 
        /* update inode */
        ci->i_version = le64_to_cpu(info->version);
-       inode->i_version++;
        inode->i_rdev = le32_to_cpu(info->rdev);
        inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
 
@@ -1185,6 +1185,7 @@ retry_lookup:
                                    ceph_snap(d_inode(dn)) != tvino.snap)) {
                                dout(" dn %p points to wrong inode %p\n",
                                     dn, d_inode(dn));
+                               ceph_dir_clear_ordered(dir);
                                d_delete(dn);
                                dput(dn);
                                goto retry_lookup;
@@ -1322,6 +1323,7 @@ retry_lookup:
                        dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
                             dn, d_inode(dn), ceph_vinop(d_inode(dn)),
                             ceph_vinop(in));
+                       ceph_dir_clear_ordered(dir);
                        d_invalidate(dn);
                        have_lease = false;
                }
@@ -1573,6 +1575,7 @@ retry_lookup:
                            ceph_snap(d_inode(dn)) != tvino.snap)) {
                        dout(" dn %p points to wrong inode %p\n",
                             dn, d_inode(dn));
+                       __ceph_dir_clear_ordered(ci);
                        d_delete(dn);
                        dput(dn);
                        goto retry_lookup;
@@ -1597,7 +1600,9 @@ retry_lookup:
                                 &req->r_caps_reservation);
                if (ret < 0) {
                        pr_err("fill_inode badness on %p\n", in);
-                       if (d_really_is_negative(dn))
+                       if (d_really_is_positive(dn))
+                               __ceph_dir_clear_ordered(ci);
+                       else
                                iput(in);
                        d_drop(dn);
                        err = ret;
index e7cce412f2cf7b5362606f2286291290a218b9c8..9e66f69ee8a5ecc9e8455465f232bf70529e59a8 100644 (file)
@@ -30,19 +30,52 @@ void __init ceph_flock_init(void)
        get_random_bytes(&lock_secret, sizeof(lock_secret));
 }
 
+static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
+{
+       struct inode *inode = file_inode(src->fl_file);
+       atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+}
+
+static void ceph_fl_release_lock(struct file_lock *fl)
+{
+       struct inode *inode = file_inode(fl->fl_file);
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       if (atomic_dec_and_test(&ci->i_filelock_ref)) {
+               /* clear error when all locks are released */
+               spin_lock(&ci->i_ceph_lock);
+               ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
+               spin_unlock(&ci->i_ceph_lock);
+       }
+}
+
+static const struct file_lock_operations ceph_fl_lock_ops = {
+       .fl_copy_lock = ceph_fl_copy_lock,
+       .fl_release_private = ceph_fl_release_lock,
+};
+
 /**
  * Implement fcntl and flock locking functions.
  */
-static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
+static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
                             int cmd, u8 wait, struct file_lock *fl)
 {
-       struct inode *inode = file_inode(file);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_mds_request *req;
        int err;
        u64 length = 0;
        u64 owner;
 
+       if (operation == CEPH_MDS_OP_SETFILELOCK) {
+               /*
+                * increasing i_filelock_ref closes race window between
+                * handling request reply and adding file_lock struct to
+                * inode. Otherwise, auth caps may get trimmed in the
+                * window. Caller function will decrease the counter.
+                */
+               fl->fl_ops = &ceph_fl_lock_ops;
+               atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+       }
+
        if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
                wait = 0;
 
@@ -180,10 +213,12 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
  */
 int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
 {
-       u8 lock_cmd;
-       int err;
-       u8 wait = 0;
+       struct inode *inode = file_inode(file);
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       int err = 0;
        u16 op = CEPH_MDS_OP_SETFILELOCK;
+       u8 wait = 0;
+       u8 lock_cmd;
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
@@ -199,6 +234,26 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
        else if (IS_SETLKW(cmd))
                wait = 1;
 
+       spin_lock(&ci->i_ceph_lock);
+       if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
+               err = -EIO;
+       } else if (op == CEPH_MDS_OP_SETFILELOCK) {
+               /*
+                * increasing i_filelock_ref closes race window between
+                * handling request reply and adding file_lock struct to
+                * inode. Otherwise, i_auth_cap may get trimmed in the
+                * window. Caller function will decrease the counter.
+                */
+               fl->fl_ops = &ceph_fl_lock_ops;
+               atomic_inc(&ci->i_filelock_ref);
+       }
+       spin_unlock(&ci->i_ceph_lock);
+       if (err < 0) {
+               if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
+                       posix_lock_file(file, fl, NULL);
+               return err;
+       }
+
        if (F_RDLCK == fl->fl_type)
                lock_cmd = CEPH_LOCK_SHARED;
        else if (F_WRLCK == fl->fl_type)
@@ -206,16 +261,16 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
        else
                lock_cmd = CEPH_LOCK_UNLOCK;
 
-       err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl);
+       err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
        if (!err) {
-               if (op != CEPH_MDS_OP_GETFILELOCK) {
+               if (op == CEPH_MDS_OP_SETFILELOCK) {
                        dout("mds locked, locking locally");
                        err = posix_lock_file(file, fl, NULL);
-                       if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
+                       if (err) {
                                /* undo! This should only happen if
                                 * the kernel detects local
                                 * deadlock. */
-                               ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
+                               ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
                                                  CEPH_LOCK_UNLOCK, 0, fl);
                                dout("got %d on posix_lock_file, undid lock",
                                     err);
@@ -227,9 +282,11 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
 
 int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
 {
-       u8 lock_cmd;
-       int err;
+       struct inode *inode = file_inode(file);
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       int err = 0;
        u8 wait = 0;
+       u8 lock_cmd;
 
        if (!(fl->fl_flags & FL_FLOCK))
                return -ENOLCK;
@@ -239,6 +296,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
 
        dout("ceph_flock, fl_file: %p", fl->fl_file);
 
+       spin_lock(&ci->i_ceph_lock);
+       if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
+               err = -EIO;
+       } else {
+               /* see comment in ceph_lock */
+               fl->fl_ops = &ceph_fl_lock_ops;
+               atomic_inc(&ci->i_filelock_ref);
+       }
+       spin_unlock(&ci->i_ceph_lock);
+       if (err < 0) {
+               if (F_UNLCK == fl->fl_type)
+                       locks_lock_file_wait(file, fl);
+               return err;
+       }
+
        if (IS_SETLKW(cmd))
                wait = 1;
 
@@ -250,13 +322,13 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
                lock_cmd = CEPH_LOCK_UNLOCK;
 
        err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
-                               file, lock_cmd, wait, fl);
+                               inode, lock_cmd, wait, fl);
        if (!err) {
                err = locks_lock_file_wait(file, fl);
                if (err) {
                        ceph_lock_message(CEPH_LOCK_FLOCK,
                                          CEPH_MDS_OP_SETFILELOCK,
-                                         file, CEPH_LOCK_UNLOCK, 0, fl);
+                                         inode, CEPH_LOCK_UNLOCK, 0, fl);
                        dout("got %d on locks_lock_file_wait, undid lock", err);
                }
        }
@@ -288,6 +360,37 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
             *flock_count, *fcntl_count);
 }
 
+/*
+ * Given a pointer to a lock, convert it to a ceph filelock
+ */
+static int lock_to_ceph_filelock(struct file_lock *lock,
+                                struct ceph_filelock *cephlock)
+{
+       int err = 0;
+       cephlock->start = cpu_to_le64(lock->fl_start);
+       cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
+       cephlock->client = cpu_to_le64(0);
+       cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
+       cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
+
+       switch (lock->fl_type) {
+       case F_RDLCK:
+               cephlock->type = CEPH_LOCK_SHARED;
+               break;
+       case F_WRLCK:
+               cephlock->type = CEPH_LOCK_EXCL;
+               break;
+       case F_UNLCK:
+               cephlock->type = CEPH_LOCK_UNLOCK;
+               break;
+       default:
+               dout("Have unknown lock type %d", lock->fl_type);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
 /**
  * Encode the flock and fcntl locks for the given inode into the ceph_filelock
  * array. Must be called with inode->i_lock already held.
@@ -356,50 +459,22 @@ int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
        if (err)
                goto out_fail;
 
-       err = ceph_pagelist_append(pagelist, flocks,
-                                  num_fcntl_locks * sizeof(*flocks));
-       if (err)
-               goto out_fail;
+       if (num_fcntl_locks > 0) {
+               err = ceph_pagelist_append(pagelist, flocks,
+                                          num_fcntl_locks * sizeof(*flocks));
+               if (err)
+                       goto out_fail;
+       }
 
        nlocks = cpu_to_le32(num_flock_locks);
        err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
        if (err)
                goto out_fail;
 
-       err = ceph_pagelist_append(pagelist,
-                                  &flocks[num_fcntl_locks],
-                                  num_flock_locks * sizeof(*flocks));
-out_fail:
-       return err;
-}
-
-/*
- * Given a pointer to a lock, convert it to a ceph filelock
- */
-int lock_to_ceph_filelock(struct file_lock *lock,
-                         struct ceph_filelock *cephlock)
-{
-       int err = 0;
-       cephlock->start = cpu_to_le64(lock->fl_start);
-       cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
-       cephlock->client = cpu_to_le64(0);
-       cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
-       cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
-
-       switch (lock->fl_type) {
-       case F_RDLCK:
-               cephlock->type = CEPH_LOCK_SHARED;
-               break;
-       case F_WRLCK:
-               cephlock->type = CEPH_LOCK_EXCL;
-               break;
-       case F_UNLCK:
-               cephlock->type = CEPH_LOCK_UNLOCK;
-               break;
-       default:
-               dout("Have unknown lock type %d", lock->fl_type);
-               err = -EINVAL;
+       if (num_flock_locks > 0) {
+               err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks],
+                                          num_flock_locks * sizeof(*flocks));
        }
-
+out_fail:
        return err;
 }
index 0687ab3c32674d863213186dc4554d17ad219bf3..ab69dcb70e8ae342733f589338c02dc226f95356 100644 (file)
@@ -1039,22 +1039,23 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
  * session caps
  */
 
-/* caller holds s_cap_lock, we drop it */
-static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
-                                struct ceph_mds_session *session)
-       __releases(session->s_cap_lock)
+static void detach_cap_releases(struct ceph_mds_session *session,
+                               struct list_head *target)
 {
-       LIST_HEAD(tmp_list);
-       list_splice_init(&session->s_cap_releases, &tmp_list);
+       lockdep_assert_held(&session->s_cap_lock);
+
+       list_splice_init(&session->s_cap_releases, target);
        session->s_num_cap_releases = 0;
-       spin_unlock(&session->s_cap_lock);
+       dout("dispose_cap_releases mds%d\n", session->s_mds);
+}
 
-       dout("cleanup_cap_releases mds%d\n", session->s_mds);
-       while (!list_empty(&tmp_list)) {
+static void dispose_cap_releases(struct ceph_mds_client *mdsc,
+                                struct list_head *dispose)
+{
+       while (!list_empty(dispose)) {
                struct ceph_cap *cap;
                /* zero out the in-progress message */
-               cap = list_first_entry(&tmp_list,
-                                       struct ceph_cap, session_caps);
+               cap = list_first_entry(dispose, struct ceph_cap, session_caps);
                list_del(&cap->session_caps);
                ceph_put_cap(mdsc, cap);
        }
@@ -1215,6 +1216,13 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                }
                spin_unlock(&mdsc->cap_dirty_lock);
 
+               if (atomic_read(&ci->i_filelock_ref) > 0) {
+                       /* make further file lock syscall return -EIO */
+                       ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
+                       pr_warn_ratelimited(" dropping file locks for %p %lld\n",
+                                           inode, ceph_ino(inode));
+               }
+
                if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
                        list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
                        ci->i_prealloc_cap_flush = NULL;
@@ -1244,6 +1252,8 @@ static void remove_session_caps(struct ceph_mds_session *session)
 {
        struct ceph_fs_client *fsc = session->s_mdsc->fsc;
        struct super_block *sb = fsc->sb;
+       LIST_HEAD(dispose);
+
        dout("remove_session_caps on %p\n", session);
        iterate_session_caps(session, remove_session_caps_cb, fsc);
 
@@ -1278,10 +1288,12 @@ static void remove_session_caps(struct ceph_mds_session *session)
        }
 
        // drop cap expires and unlock s_cap_lock
-       cleanup_cap_releases(session->s_mdsc, session);
+       detach_cap_releases(session, &dispose);
 
        BUG_ON(session->s_nr_caps > 0);
        BUG_ON(!list_empty(&session->s_cap_flushing));
+       spin_unlock(&session->s_cap_lock);
+       dispose_cap_releases(session->s_mdsc, &dispose);
 }
 
 /*
@@ -1462,6 +1474,11 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
                        goto out;
                if ((used | wanted) & CEPH_CAP_ANY_WR)
                        goto out;
+               /* Note: it's possible that i_filelock_ref becomes non-zero
+                * after dropping auth caps. It doesn't hurt because reply
+                * of lock mds request will re-add auth caps. */
+               if (atomic_read(&ci->i_filelock_ref) > 0)
+                       goto out;
        }
        /* The inode has cached pages, but it's no longer used.
         * we can safely drop it */
@@ -2827,7 +2844,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                struct ceph_mds_cap_reconnect v2;
                struct ceph_mds_cap_reconnect_v1 v1;
        } rec;
-       struct ceph_inode_info *ci;
+       struct ceph_inode_info *ci = cap->ci;
        struct ceph_reconnect_state *recon_state = arg;
        struct ceph_pagelist *pagelist = recon_state->pagelist;
        char *path;
@@ -2836,8 +2853,6 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        u64 snap_follows;
        struct dentry *dentry;
 
-       ci = cap->ci;
-
        dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
             inode, ceph_vinop(inode), cap, cap->cap_id,
             ceph_cap_string(cap->issued));
@@ -2870,7 +2885,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                rec.v2.issued = cpu_to_le32(cap->issued);
                rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
                rec.v2.pathbase = cpu_to_le64(pathbase);
-               rec.v2.flock_len = 0;
+               rec.v2.flock_len = (__force __le32)
+                       ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
        } else {
                rec.v1.cap_id = cpu_to_le64(cap->cap_id);
                rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
@@ -2894,26 +2910,37 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
 
        if (recon_state->msg_version >= 2) {
                int num_fcntl_locks, num_flock_locks;
-               struct ceph_filelock *flocks;
+               struct ceph_filelock *flocks = NULL;
                size_t struct_len, total_len = 0;
                u8 struct_v = 0;
 
 encode_again:
-               ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
-               flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
-                                sizeof(struct ceph_filelock), GFP_NOFS);
-               if (!flocks) {
-                       err = -ENOMEM;
-                       goto out_free;
+               if (rec.v2.flock_len) {
+                       ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
+               } else {
+                       num_fcntl_locks = 0;
+                       num_flock_locks = 0;
                }
-               err = ceph_encode_locks_to_buffer(inode, flocks,
-                                                 num_fcntl_locks,
-                                                 num_flock_locks);
-               if (err) {
+               if (num_fcntl_locks + num_flock_locks > 0) {
+                       flocks = kmalloc((num_fcntl_locks + num_flock_locks) *
+                                        sizeof(struct ceph_filelock), GFP_NOFS);
+                       if (!flocks) {
+                               err = -ENOMEM;
+                               goto out_free;
+                       }
+                       err = ceph_encode_locks_to_buffer(inode, flocks,
+                                                         num_fcntl_locks,
+                                                         num_flock_locks);
+                       if (err) {
+                               kfree(flocks);
+                               flocks = NULL;
+                               if (err == -ENOSPC)
+                                       goto encode_again;
+                               goto out_free;
+                       }
+               } else {
                        kfree(flocks);
-                       if (err == -ENOSPC)
-                               goto encode_again;
-                       goto out_free;
+                       flocks = NULL;
                }
 
                if (recon_state->msg_version >= 3) {
@@ -2993,6 +3020,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        int s_nr_caps;
        struct ceph_pagelist *pagelist;
        struct ceph_reconnect_state recon_state;
+       LIST_HEAD(dispose);
 
        pr_info("mds%d reconnect start\n", mds);
 
@@ -3026,7 +3054,9 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
         */
        session->s_cap_reconnect = 1;
        /* drop old cap expires; we're about to reestablish that state */
-       cleanup_cap_releases(mdsc, session);
+       detach_cap_releases(session, &dispose);
+       spin_unlock(&session->s_cap_lock);
+       dispose_cap_releases(mdsc, &dispose);
 
        /* trim unused caps to reduce MDS's cache rejoin time */
        if (mdsc->fsc->sb->s_root)
@@ -3857,14 +3887,14 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
                goto err_out;
        }
        return;
+
 bad:
        pr_err("error decoding fsmap\n");
 err_out:
        mutex_lock(&mdsc->mutex);
-       mdsc->mdsmap_err = -ENOENT;
+       mdsc->mdsmap_err = err;
        __wake_requests(mdsc, &mdsc->waiting_for_map);
        mutex_unlock(&mdsc->mutex);
-       return;
 }
 
 /*
index e4082afedcb15a447ee7fd344aa2a3def391d43c..a62d2a9841dc2b0487181155373c03eac60f8a02 100644 (file)
@@ -84,8 +84,9 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_ffree = -1;
        buf->f_namelen = NAME_MAX;
 
-       /* leave fsid little-endian, regardless of host endianness */
-       fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
+       /* Must convert the fsid, for consistent values across arches */
+       fsid = le64_to_cpu(*(__le64 *)(&monmap->fsid)) ^
+              le64_to_cpu(*((__le64 *)&monmap->fsid + 1));
        buf->f_fsid.val[0] = fsid & 0xffffffff;
        buf->f_fsid.val[1] = fsid >> 32;
 
@@ -330,11 +331,11 @@ static int parse_fsopt_token(char *c, void *private)
                break;
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
        case Opt_acl:
-               fsopt->sb_flags |= MS_POSIXACL;
+               fsopt->sb_flags |= SB_POSIXACL;
                break;
 #endif
        case Opt_noacl:
-               fsopt->sb_flags &= ~MS_POSIXACL;
+               fsopt->sb_flags &= ~SB_POSIXACL;
                break;
        default:
                BUG_ON(token);
@@ -519,7 +520,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",nopoolperm");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
-       if (fsopt->sb_flags & MS_POSIXACL)
+       if (fsopt->sb_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
        else
                seq_puts(m, ",noacl");
@@ -987,7 +988,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        dout("ceph_mount\n");
 
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
-       flags |= MS_POSIXACL;
+       flags |= SB_POSIXACL;
 #endif
        err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
        if (err < 0) {
index 3e27a28aa44adfd2da34ff995ff9481ac749d043..2beeec07fa76ce199e7d461831b9b93a705419ee 100644 (file)
@@ -352,6 +352,7 @@ struct ceph_inode_info {
        int i_pin_ref;
        int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref;
        int i_wrbuffer_ref, i_wrbuffer_ref_head;
+       atomic_t i_filelock_ref;
        u32 i_shared_gen;       /* increment each time we get FILE_SHARED */
        u32 i_rdcache_gen;      /* incremented each time we get FILE_CACHE. */
        u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
@@ -487,6 +488,8 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
 #define CEPH_I_KICK_FLUSH      (1 << 9)  /* kick flushing caps */
 #define CEPH_I_FLUSH_SNAPS     (1 << 10) /* need flush snapss */
 #define CEPH_I_ERROR_WRITE     (1 << 11) /* have seen write errors */
+#define CEPH_I_ERROR_FILELOCK  (1 << 12) /* have seen file lock errors */
+
 
 /*
  * We set the ERROR_WRITE bit when we start seeing write errors on an inode
@@ -1011,7 +1014,6 @@ extern int ceph_encode_locks_to_buffer(struct inode *inode,
 extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
                                  struct ceph_pagelist *pagelist,
                                  int num_fcntl_locks, int num_flock_locks);
-extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
 
 /* debugfs.c */
 extern int ceph_fs_debugfs_init(struct ceph_fs_client *client);
index cbd216b572390ca76e481aabf9a311e4b7749d7c..350fa55a1bf79878f9f390a3883998ce7cf198a4 100644 (file)
@@ -42,7 +42,7 @@
 #define CIFS_MOUNT_MULTIUSER   0x20000 /* multiuser mount */
 #define CIFS_MOUNT_STRICT_IO   0x40000 /* strict cache mode */
 #define CIFS_MOUNT_RWPIDFORWARD        0x80000 /* use pid forwarding for rw */
-#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
+#define CIFS_MOUNT_POSIXACL    0x100000 /* mirror of SB_POSIXACL in mnt_cifs_flags */
 #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
 #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
 #define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
index 8c8b75d33f310ce5e258042ff489f942379cdd27..31b7565b161756e01e9b0f10cf358e5bfccab3e4 100644 (file)
@@ -125,7 +125,7 @@ cifs_read_super(struct super_block *sb)
        tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
 
        if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -497,7 +497,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_puts(s, ",cifsacl");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
                seq_puts(s, ",dynperm");
-       if (root->d_sb->s_flags & MS_POSIXACL)
+       if (root->d_sb->s_flags & SB_POSIXACL)
                seq_puts(s, ",acl");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
                seq_puts(s, ",mfsymlinks");
@@ -573,7 +573,7 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 static int cifs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return 0;
 }
 
@@ -708,7 +708,7 @@ cifs_do_mount(struct file_system_type *fs_type,
 
        rc = cifs_mount(cifs_sb, volume_info);
        if (rc) {
-               if (!(flags & MS_SILENT))
+               if (!(flags & SB_SILENT))
                        cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
                                 rc);
                root = ERR_PTR(rc);
@@ -720,7 +720,7 @@ cifs_do_mount(struct file_system_type *fs_type,
        mnt_data.flags = flags;
 
        /* BB should we make this contingent on mount parm? */
-       flags |= MS_NODIRATIME | MS_NOATIME;
+       flags |= SB_NODIRATIME | SB_NOATIME;
 
        sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
        if (IS_ERR(sb)) {
@@ -739,7 +739,7 @@ cifs_do_mount(struct file_system_type *fs_type,
                        goto out_super;
                }
 
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
        }
 
        root = cifs_get_root(volume_info, sb);
index e185b2853eab7b1116dafc7ca8aeeb6d09b10687..b16583594d1ad481d694036828cc7a718a94f37a 100644 (file)
@@ -559,8 +559,8 @@ struct smb_vol {
                         CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
                         CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
 
-#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
-                     MS_NODEV | MS_SYNCHRONOUS)
+#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
+                     SB_NODEV | SB_SYNCHRONOUS)
 
 struct cifs_mnt_data {
        struct cifs_sb_info *cifs_sb;
index 7c732cb4416411e597f2e1a4af96fd8bf7e49beb..ecb99079363ab7a85c0cdf7496f76061fa43a6e4 100644 (file)
@@ -985,7 +985,7 @@ retry_iget5_locked:
                }
 
                cifs_fattr_to_inode(inode, fattr);
-               if (sb->s_flags & MS_NOATIME)
+               if (sb->s_flags & SB_NOATIME)
                        inode->i_flags |= S_NOATIME | S_NOCMTIME;
                if (inode->i_state & I_NEW) {
                        inode->i_ino = hash;
index 52f975d848a076e6873d55b6429c0c62588ecb51..316af84674f110764a6e1245b099b71c47aa5854 100644 (file)
@@ -117,7 +117,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
 #ifdef CONFIG_CIFS_POSIX
                if (!value)
                        goto out;
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
                                value, (const int)size,
                                ACL_TYPE_ACCESS, cifs_sb->local_nls,
@@ -129,7 +129,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
 #ifdef CONFIG_CIFS_POSIX
                if (!value)
                        goto out;
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
                                value, (const int)size,
                                ACL_TYPE_DEFAULT, cifs_sb->local_nls,
@@ -266,7 +266,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
 
        case XATTR_ACL_ACCESS:
 #ifdef CONFIG_CIFS_POSIX
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
                                value, size, ACL_TYPE_ACCESS,
                                cifs_sb->local_nls,
@@ -276,7 +276,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
 
        case XATTR_ACL_DEFAULT:
 #ifdef CONFIG_CIFS_POSIX
-               if (sb->s_flags & MS_POSIXACL)
+               if (sb->s_flags & SB_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
                                value, size, ACL_TYPE_DEFAULT,
                                cifs_sb->local_nls,
index 6f0a6a4d5faa95aff7b7f0e948ba7b55bed99eb7..97424cf206c08af0519d74f82057180e5f8d6248 100644 (file)
@@ -96,7 +96,7 @@ void coda_destroy_inodecache(void)
 static int coda_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
@@ -188,7 +188,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
        mutex_unlock(&vc->vc_mutex);
 
        sb->s_fs_info = vc;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
        sb->s_blocksize = 4096; /* XXXXX  what do we put here?? */
        sb->s_blocksize_bits = 12;
        sb->s_magic = CODA_SUPER_MAGIC;
index 9a2ab419ba624bbb86a5f978313e35f984627440..017b0ab19bc4d98625349ce65109f7f48f5551c8 100644 (file)
@@ -505,7 +505,7 @@ static void cramfs_kill_sb(struct super_block *sb)
 static int cramfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -592,7 +592,7 @@ static int cramfs_finalize_super(struct super_block *sb,
        struct inode *root;
 
        /* Set it all up.. */
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        sb->s_op = &cramfs_ops;
        root = get_cramfs_inode(sb, cramfs_root, 0);
        if (IS_ERR(root))
index 95981591977a04d08f300c0795fcd96a4211adc1..78b72c48374e5eed09587292f3b7eee62059e18b 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -627,7 +627,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
 
                        if (pfn != pmd_pfn(*pmdp))
                                goto unlock_pmd;
-                       if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+                       if (!pmd_dirty(*pmdp)
+                                       && !pmd_access_permitted(*pmdp, WRITE))
                                goto unlock_pmd;
 
                        flush_cache_page(vma, address, pfn);
index f2677c90d96e1ea140bb908c14dcab3aac49bd7e..025d66a705db6bf41ba52ada490806780d101083 100644 (file)
@@ -560,8 +560,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
         * Set the POSIX ACL flag based on whether they're enabled in the lower
         * mount.
         */
-       s->s_flags = flags & ~MS_POSIXACL;
-       s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+       s->s_flags = flags & ~SB_POSIXACL;
+       s->s_flags |= path.dentry->d_sb->s_flags & SB_POSIXACL;
 
        /**
         * Force a read-only eCryptfs mount when:
@@ -569,7 +569,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
         *   2) The ecryptfs_encrypted_view mount option is specified
         */
        if (sb_rdonly(path.dentry->d_sb) || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
 
        s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
        s->s_blocksize = path.dentry->d_sb->s_blocksize;
@@ -602,7 +602,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
        ecryptfs_set_dentry_private(s->s_root, root_info);
        root_info->lower_path = path;
 
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
        return dget(s->s_root);
 
 out_free:
index 65b59009555b6e3c8cc8d6f647ced0de09de3319..6ffb7ba1547a66508d3179fe3dab86362f70419d 100644 (file)
@@ -116,7 +116,7 @@ static void destroy_inodecache(void)
 static int efs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -311,7 +311,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
 #ifdef DEBUG
                pr_info("forcing read-only mode\n");
 #endif
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
        }
        s->s_op   = &efs_superblock_operations;
        s->s_export_op = &efs_export_ops;
index 396a3c075fd42049460c319c70f44a56926d7ca5..afd548ebc32820ab0888ca82cc8f2fdde2de170d 100644 (file)
@@ -276,12 +276,6 @@ static DEFINE_MUTEX(epmutex);
 /* Used to check for epoll file descriptor inclusion loops */
 static struct nested_calls poll_loop_ncalls;
 
-/* Used for safe wake up implementation */
-static struct nested_calls poll_safewake_ncalls;
-
-/* Used to call file's f_op->poll() under the nested calls boundaries */
-static struct nested_calls poll_readywalk_ncalls;
-
 /* Slab cache used to allocate "struct epitem" */
 static struct kmem_cache *epi_cache __read_mostly;
 
@@ -551,40 +545,21 @@ out_unlock:
  * this special case of epoll.
  */
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
-                                    unsigned long events, int subclass)
+
+static struct nested_calls poll_safewake_ncalls;
+
+static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
 {
        unsigned long flags;
+       wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie;
 
-       spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
-       wake_up_locked_poll(wqueue, events);
+       spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1);
+       wake_up_locked_poll(wqueue, POLLIN);
        spin_unlock_irqrestore(&wqueue->lock, flags);
-}
-#else
-static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
-                                    unsigned long events, int subclass)
-{
-       wake_up_poll(wqueue, events);
-}
-#endif
 
-static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
-{
-       ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
-                         1 + call_nests);
        return 0;
 }
 
-/*
- * Perform a safe wake up of the poll wait list. The problem is that
- * with the new callback'd wake up system, it is possible that the
- * poll callback is reentered from inside the call to wake_up() done
- * on the poll wait queue head. The rule is that we cannot reenter the
- * wake up code from the same task more than EP_MAX_NESTS times,
- * and we cannot reenter the same wait queue head at all. This will
- * enable to have a hierarchy of epoll file descriptor of no more than
- * EP_MAX_NESTS deep.
- */
 static void ep_poll_safewake(wait_queue_head_t *wq)
 {
        int this_cpu = get_cpu();
@@ -595,6 +570,15 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
        put_cpu();
 }
 
+#else
+
+static void ep_poll_safewake(wait_queue_head_t *wq)
+{
+       wake_up_poll(wq, POLLIN);
+}
+
+#endif
+
 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
 {
        wait_queue_head_t *whead;
@@ -880,11 +864,33 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static inline unsigned int ep_item_poll(struct epitem *epi, poll_table *pt)
+static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
+                              void *priv);
+static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
+                                poll_table *pt);
+
+/*
+ * Differs from ep_eventpoll_poll() in that internal callers already have
+ * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
+ * is correctly annotated.
+ */
+static unsigned int ep_item_poll(struct epitem *epi, poll_table *pt, int depth)
 {
+       struct eventpoll *ep;
+       bool locked;
+
        pt->_key = epi->event.events;
+       if (!is_file_epoll(epi->ffd.file))
+               return epi->ffd.file->f_op->poll(epi->ffd.file, pt) &
+                      epi->event.events;
+
+       ep = epi->ffd.file->private_data;
+       poll_wait(epi->ffd.file, &ep->poll_wait, pt);
+       locked = pt && (pt->_qproc == ep_ptable_queue_proc);
 
-       return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & epi->event.events;
+       return ep_scan_ready_list(epi->ffd.file->private_data,
+                                 ep_read_events_proc, &depth, depth,
+                                 locked) & epi->event.events;
 }
 
 static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
@@ -892,13 +898,15 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
 {
        struct epitem *epi, *tmp;
        poll_table pt;
+       int depth = *(int *)priv;
 
        init_poll_funcptr(&pt, NULL);
+       depth++;
 
        list_for_each_entry_safe(epi, tmp, head, rdllink) {
-               if (ep_item_poll(epi, &pt))
+               if (ep_item_poll(epi, &pt, depth)) {
                        return POLLIN | POLLRDNORM;
-               else {
+               else {
                        /*
                         * Item has been dropped into the ready list by the poll
                         * callback, but it's not actually ready, as far as
@@ -912,48 +920,20 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
        return 0;
 }
 
-static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
-                                poll_table *pt);
-
-struct readyevents_arg {
-       struct eventpoll *ep;
-       bool locked;
-};
-
-static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
-{
-       struct readyevents_arg *arg = priv;
-
-       return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
-                                 call_nests + 1, arg->locked);
-}
-
 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
 {
-       int pollflags;
        struct eventpoll *ep = file->private_data;
-       struct readyevents_arg arg;
-
-       /*
-        * During ep_insert() we already hold the ep->mtx for the tfile.
-        * Prevent re-aquisition.
-        */
-       arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
-       arg.ep = ep;
+       int depth = 0;
 
        /* Insert inside our poll wait queue */
        poll_wait(file, &ep->poll_wait, wait);
 
        /*
         * Proceed to find out if wanted events are really available inside
-        * the ready list. This need to be done under ep_call_nested()
-        * supervision, since the call to f_op->poll() done on listed files
-        * could re-enter here.
+        * the ready list.
         */
-       pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
-                                  ep_poll_readyevents_proc, &arg, ep, current);
-
-       return pollflags != -1 ? pollflags : 0;
+       return ep_scan_ready_list(ep, ep_read_events_proc,
+                                 &depth, depth, false);
 }
 
 #ifdef CONFIG_PROC_FS
@@ -1472,7 +1452,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
         * this operation completes, the poll callback can start hitting
         * the new item.
         */
-       revents = ep_item_poll(epi, &epq.pt);
+       revents = ep_item_poll(epi, &epq.pt, 1);
 
        /*
         * We have to check if something went wrong during the poll wait queue
@@ -1606,7 +1586,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
         * Get current event bits. We can safely use the file* here because
         * its usage count has been increased by the caller of this function.
         */
-       revents = ep_item_poll(epi, &pt);
+       revents = ep_item_poll(epi, &pt, 1);
 
        /*
         * If the item is "hot" and it is not registered inside the ready
@@ -1674,7 +1654,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
 
                list_del_init(&epi->rdllink);
 
-               revents = ep_item_poll(epi, &pt);
+               revents = ep_item_poll(epi, &pt, 1);
 
                /*
                 * If the event mask intersect the caller-requested one,
@@ -2313,11 +2293,10 @@ static int __init eventpoll_init(void)
         */
        ep_nested_calls_init(&poll_loop_ncalls);
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
        /* Initialize the structure used to perform safe poll wait head wake ups */
        ep_nested_calls_init(&poll_safewake_ncalls);
-
-       /* Initialize the structure used to perform file's f_op->poll() calls */
-       ep_nested_calls_init(&poll_readywalk_ncalls);
+#endif
 
        /*
         * We can have many thousands of epitems, so prevent this from
@@ -2327,11 +2306,11 @@ static int __init eventpoll_init(void)
 
        /* Allocates slab cache used to allocate "struct epitem" items */
        epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
-                       0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+                       0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
 
        /* Allocates slab cache used to allocate "struct eppoll_entry" */
        pwq_cache = kmem_cache_create("eventpoll_pwq",
-                       sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
+               sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
 
        return 0;
 }
index 1d6243d9f2b653e679165099be9332776805b8bd..6be2aa0ab26fe26cb37032b99bba656f8d7c6b51 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1340,10 +1340,15 @@ void setup_new_exec(struct linux_binprm * bprm)
                 * avoid bad behavior from the prior rlimits. This has to
                 * happen before arch_pick_mmap_layout(), which examines
                 * RLIMIT_STACK, but after the point of no return to avoid
-                * needing to clean up the change on failure.
+                * races from other threads changing the limits. This also
+                * must be protected from races with prlimit() calls.
                 */
+               task_lock(current->group_leader);
                if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
                        current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
+               if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
+                       current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
+               task_unlock(current->group_leader);
        }
 
        arch_pick_mmap_layout(current->mm);
index e1b3724bebf23bb78622f81aa3a085f4d304e532..33db13365c5eb8c52265218a327f302dbac2fed5 100644 (file)
@@ -548,7 +548,7 @@ do_more:
        }
 
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        group_adjust_blocks(sb, block_group, desc, bh2, group_freed);
@@ -1424,7 +1424,7 @@ allocated:
        percpu_counter_sub(&sbi->s_freeblocks_counter, num);
 
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        *errp = 0;
index a1fc3dabca41b979db18ec1688d2cda0307a5680..6484199b35d1ec1bb63879593e5bb20f0af2bb77 100644 (file)
@@ -145,7 +145,7 @@ void ext2_free_inode (struct inode * inode)
        else
                ext2_release_inode(sb, block_group, is_directory);
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
 
        brelse(bitmap_bh);
@@ -517,7 +517,7 @@ repeat_in_this_group:
        goto fail;
 got:
        mark_buffer_dirty(bitmap_bh);
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                sync_dirty_buffer(bitmap_bh);
        brelse(bitmap_bh);
 
index e2b6be03e69b5aee2987c13bdfac9055e66d9064..7646818ab266ff81b86003b28f661fdbd7d9ba2f 100644 (file)
@@ -75,7 +75,7 @@ void ext2_error(struct super_block *sb, const char *function,
        if (test_opt(sb, ERRORS_RO)) {
                ext2_msg(sb, KERN_CRIT,
                             "error: remounting filesystem read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 }
 
@@ -656,7 +656,7 @@ static int ext2_setup_super (struct super_block * sb,
                ext2_msg(sb, KERN_ERR,
                        "error: revision level too high, "
                        "forcing read-only mode");
-               res = MS_RDONLY;
+               res = SB_RDONLY;
        }
        if (read_only)
                return res;
@@ -924,9 +924,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_resuid = opts.s_resuid;
        sbi->s_resgid = opts.s_resgid;
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
                ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
-                MS_POSIXACL : 0);
+                SB_POSIXACL : 0);
        sb->s_iflags |= SB_I_CGROUPWB;
 
        if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
@@ -1178,7 +1178,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
                ext2_msg(sb, KERN_WARNING,
                        "warning: mounting ext3 filesystem as ext2");
        if (ext2_setup_super (sb, es, sb_rdonly(sb)))
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        ext2_write_super(sb);
        return 0;
 
@@ -1341,9 +1341,9 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
                         "dax flag with busy inodes while remounting");
                new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
        }
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out_set;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
                    !(sbi->s_mount_state & EXT2_VALID_FS))
                        goto out_set;
@@ -1379,7 +1379,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
                 */
                sbi->s_mount_state = le16_to_cpu(es->s_state);
                if (!ext2_setup_super (sb, es, 0))
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
                spin_unlock(&sbi->s_lock);
 
                ext2_write_super(sb);
@@ -1392,8 +1392,8 @@ out_set:
        sbi->s_mount_opt = new_opts.s_mount_opt;
        sbi->s_resuid = new_opts.s_resuid;
        sbi->s_resgid = new_opts.s_resgid;
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
        spin_unlock(&sbi->s_lock);
 
        return 0;
index 0992d76f7ab15b94b12014d312d164666f226870..7df2c5644e59c9678e9379985338f972a69d42d1 100644 (file)
@@ -2742,7 +2742,7 @@ static int ext4_writepages(struct address_space *mapping,
         * If the filesystem has aborted, it is read-only, so return
         * right away instead of dumping stack traces later on that
         * will obscure the real source of the problem.  We test
-        * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
+        * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
         * the latter could be true if the filesystem is mounted
         * read-only, and in that case, ext4_writepages should
         * *never* be called, so if that ever happens, we would want
@@ -5183,7 +5183,7 @@ static int ext4_do_update_inode(handle_t *handle,
 
        ext4_inode_csum_set(inode, raw_inode, ei);
        spin_unlock(&ei->i_raw_lock);
-       if (inode->i_sb->s_flags & MS_LAZYTIME)
+       if (inode->i_sb->s_flags & SB_LAZYTIME)
                ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
                                              bh->b_data);
 
index 0556cd036b69ebc4c06e8497216182b8144198c1..7c46693a14d763d53b84eec9602f4a854918bb4d 100644 (file)
@@ -422,7 +422,7 @@ static void ext4_handle_error(struct super_block *sb)
                 * before ->s_flags update
                 */
                smp_wmb();
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if (test_opt(sb, ERRORS_PANIC)) {
                if (EXT4_SB(sb)->s_journal &&
@@ -635,7 +635,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
                 * before ->s_flags update
                 */
                smp_wmb();
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                if (EXT4_SB(sb)->s_journal)
                        jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
                save_error_info(sb, function, line);
@@ -1682,10 +1682,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                sb->s_flags |= SB_I_VERSION;
                return 1;
        case Opt_lazytime:
-               sb->s_flags |= MS_LAZYTIME;
+               sb->s_flags |= SB_LAZYTIME;
                return 1;
        case Opt_nolazytime:
-               sb->s_flags &= ~MS_LAZYTIME;
+               sb->s_flags &= ~SB_LAZYTIME;
                return 1;
        }
 
@@ -2116,7 +2116,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
        if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
                ext4_msg(sb, KERN_ERR, "revision level too high, "
                         "forcing read-only mode");
-               res = MS_RDONLY;
+               res = SB_RDONLY;
        }
        if (read_only)
                goto done;
@@ -2429,7 +2429,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
 
        if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
                /* don't clear list on RO mount w/ errors */
-               if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
+               if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
                        ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
                                  "clearing orphan list.\n");
                        es->s_last_orphan = 0;
@@ -2438,19 +2438,19 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                return;
        }
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
        }
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sb->s_flags |= MS_ACTIVE;
+       sb->s_flags |= SB_ACTIVE;
 
        /*
         * Turn on quotas which were not enabled for read-only mounts if
         * filesystem has quota feature, so that they are updated correctly.
         */
-       if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+       if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
                int ret = ext4_enable_quotas(sb);
 
                if (!ret)
@@ -2539,7 +2539,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                }
        }
 #endif
-       sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 }
 
 /*
@@ -2741,7 +2741,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
 
        if (ext4_has_feature_readonly(sb)) {
                ext4_msg(sb, KERN_INFO, "filesystem is read-only");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                return 1;
        }
 
@@ -3623,8 +3623,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                sb->s_iflags |= SB_I_CGROUPWB;
        }
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
 
        if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
            (ext4_has_compat_features(sb) ||
@@ -4199,7 +4199,7 @@ no_journal:
        }
 
        if (ext4_setup_super(sb, es, sb_rdonly(sb)))
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
        /* determine the minimum size of new large inodes, if present */
        if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
@@ -4693,7 +4693,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
         * the clock is set in the future, and this will cause e2fsck
         * to complain and force a full file system check.
         */
-       if (!(sb->s_flags & MS_RDONLY))
+       if (!(sb->s_flags & SB_RDONLY))
                es->s_wtime = cpu_to_le32(get_seconds());
        if (sb->s_bdev->bd_part)
                es->s_kbytes_written =
@@ -5047,8 +5047,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
                ext4_abort(sb, "Abort forced by user");
 
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
 
        es = sbi->s_es;
 
@@ -5057,16 +5057,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
        }
 
-       if (*flags & MS_LAZYTIME)
-               sb->s_flags |= MS_LAZYTIME;
+       if (*flags & SB_LAZYTIME)
+               sb->s_flags |= SB_LAZYTIME;
 
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
                if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
                        err = -EROFS;
                        goto restore_opts;
                }
 
-               if (*flags & MS_RDONLY) {
+               if (*flags & SB_RDONLY) {
                        err = sync_filesystem(sb);
                        if (err < 0)
                                goto restore_opts;
@@ -5078,7 +5078,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                         * First of all, the unconditional stuff we have to do
                         * to disable replay of the journal when we next remount
                         */
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
 
                        /*
                         * OK, test if we are remounting a valid rw partition
@@ -5140,7 +5140,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                ext4_clear_journal_err(sb, es);
                        sbi->s_mount_state = le16_to_cpu(es->s_state);
                        if (!ext4_setup_super(sb, es, 0))
-                               sb->s_flags &= ~MS_RDONLY;
+                               sb->s_flags &= ~SB_RDONLY;
                        if (ext4_has_feature_mmp(sb))
                                if (ext4_multi_mount_protect(sb,
                                                le64_to_cpu(es->s_mmp_block))) {
@@ -5164,7 +5164,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        ext4_setup_system_zone(sb);
-       if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+       if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
                ext4_commit_super(sb, 1);
 
 #ifdef CONFIG_QUOTA
@@ -5182,7 +5182,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 #endif
 
-       *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
+       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
        ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
        kfree(orig_data);
        return 0;
index dd2e73e10857a33428c6bfd35eba738d4d232e45..4aa69bc1c70af31fbad8fe0f37ef5a007ada5893 100644 (file)
@@ -617,17 +617,17 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
        if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
                return 0;
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sbi->sb->s_flags &= ~MS_RDONLY;
+               sbi->sb->s_flags &= ~SB_RDONLY;
        }
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sbi->sb->s_flags |= MS_ACTIVE;
+       sbi->sb->s_flags |= SB_ACTIVE;
 
        /* Turn on quotas so that they are updated correctly */
-       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 #endif
 
        start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -658,7 +658,7 @@ out:
        if (quota_enabled)
                f2fs_quota_off_umount(sbi->sb);
 #endif
-       sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 
        return err;
 }
index f4e094e816c63df79bd40f62b097147826037dae..6abf26c31d01885bc61abaa5625720a38a25cf6d 100644 (file)
@@ -2378,7 +2378,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
 
 static inline int f2fs_readonly(struct super_block *sb)
 {
-       return sb->s_flags & MS_RDONLY;
+       return sb->s_flags & SB_RDONLY;
 }
 
 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
index 5d5bba462f26390512a50c4359ebc99b3b3481dc..d844dcb805703ef721a443cfa23194a298c5de23 100644 (file)
@@ -1005,7 +1005,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
 
        cpc.reason = __get_cp_reason(sbi);
 gc_more:
-       if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
+       if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
                ret = -EINVAL;
                goto stop;
        }
index 92c57ace1939b0a5d086cee4366f3f2168926c36..b3a14b0429f23c65afcd3185f138ca3e49e71760 100644 (file)
@@ -598,16 +598,16 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
        int quota_enabled;
 #endif
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
-               sbi->sb->s_flags &= ~MS_RDONLY;
+               sbi->sb->s_flags &= ~SB_RDONLY;
        }
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       sbi->sb->s_flags |= MS_ACTIVE;
+       sbi->sb->s_flags |= SB_ACTIVE;
        /* Turn on quotas so that they are updated correctly */
-       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+       quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
 #endif
 
        fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -671,7 +671,7 @@ out:
        if (quota_enabled)
                f2fs_quota_off_umount(sbi->sb);
 #endif
-       sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+       sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
 
        return ret ? ret: err;
 }
index a6c5dd450002daa7d1c43f414d0ffbaa17ff1b04..708155d9c2e42810ed05ae7ecd953ce5ad7069de 100644 (file)
@@ -534,10 +534,10 @@ static int parse_options(struct super_block *sb, char *options)
 #endif
                        break;
                case Opt_lazytime:
-                       sb->s_flags |= MS_LAZYTIME;
+                       sb->s_flags |= SB_LAZYTIME;
                        break;
                case Opt_nolazytime:
-                       sb->s_flags &= ~MS_LAZYTIME;
+                       sb->s_flags &= ~SB_LAZYTIME;
                        break;
 #ifdef CONFIG_QUOTA
                case Opt_quota:
@@ -1168,7 +1168,7 @@ static void default_options(struct f2fs_sb_info *sbi)
        set_opt(sbi, INLINE_DENTRY);
        set_opt(sbi, EXTENT_CACHE);
        set_opt(sbi, NOHEAP);
-       sbi->sb->s_flags |= MS_LAZYTIME;
+       sbi->sb->s_flags |= SB_LAZYTIME;
        set_opt(sbi, FLUSH_MERGE);
        if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
                set_opt_mode(sbi, F2FS_MOUNT_LFS);
@@ -1236,7 +1236,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 #endif
 
        /* recover superblocks we couldn't write due to previous RO mount */
-       if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+       if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
                err = f2fs_commit_super(sbi, false);
                f2fs_msg(sb, KERN_INFO,
                        "Try to recover all the superblocks, ret: %d", err);
@@ -1255,17 +1255,17 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * Previous and new state of filesystem is RO,
         * so skip checking GC and FLUSH_MERGE conditions.
         */
-       if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
+       if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
                goto skip;
 
 #ifdef CONFIG_QUOTA
-       if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+       if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
                err = dquot_suspend(sb, -1);
                if (err < 0)
                        goto restore_opts;
        } else {
                /* dquot_resume needs RW */
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
                if (sb_any_quota_suspended(sb)) {
                        dquot_resume(sb, -1);
                } else if (f2fs_sb_has_quota_ino(sb)) {
@@ -1288,7 +1288,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * or if background_gc = off is passed in mount
         * option. Also sync the filesystem.
         */
-       if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
+       if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
                if (sbi->gc_thread) {
                        stop_gc_thread(sbi);
                        need_restart_gc = true;
@@ -1300,7 +1300,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                need_stop_gc = true;
        }
 
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                writeback_inodes_sb(sb, WB_REASON_SYNC);
                sync_inodes_sb(sb);
 
@@ -1314,7 +1314,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
         * We stop issue flush thread if FS is mounted as RO
         * or if flush_merge is not passed in mount option.
         */
-       if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+       if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
                clear_opt(sbi, FLUSH_MERGE);
                destroy_flush_cmd_control(sbi, false);
        } else {
@@ -1329,8 +1329,8 @@ skip:
                kfree(s_qf_names[i]);
 #endif
        /* Update the POSIXACL Flag */
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
 
        return 0;
 restore_gc:
@@ -2472,8 +2472,8 @@ try_onemore:
        sb->s_export_op = &f2fs_export_ops;
        sb->s_magic = F2FS_SUPER_MAGIC;
        sb->s_time_gran = 1;
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
        memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 
        /* init f2fs-specific super block info */
index 81cecbe6d7cf6b193a416df0c6d4b5585255ec56..b833ffeee1e1441546d9902d0c61728aae635479 100644 (file)
@@ -291,7 +291,6 @@ static int fat_parse_long(struct inode *dir, loff_t *pos,
                }
        }
 parse_long:
-       slots = 0;
        ds = (struct msdos_dir_slot *)*de;
        id = ds->id;
        if (!(id & 0x40))
index 48b2336692f9f70a3d8c230ee3a9169af5e634be..bac10de678cc9645af9eccd2584d0ffc548b9c93 100644 (file)
@@ -392,7 +392,7 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
                        memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
                        set_buffer_uptodate(c_bh);
                        mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
-                       if (sb->s_flags & MS_SYNCHRONOUS)
+                       if (sb->s_flags & SB_SYNCHRONOUS)
                                err = sync_dirty_buffer(c_bh);
                        brelse(c_bh);
                        if (err)
@@ -597,7 +597,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                }
 
                if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
-                       if (sb->s_flags & MS_SYNCHRONOUS) {
+                       if (sb->s_flags & SB_SYNCHRONOUS) {
                                err = fat_sync_bhs(bhs, nr_bhs);
                                if (err)
                                        goto error;
@@ -612,7 +612,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                fat_collect_bhs(bhs, &nr_bhs, &fatent);
        } while (cluster != FAT_ENT_EOF);
 
-       if (sb->s_flags & MS_SYNCHRONOUS) {
+       if (sb->s_flags & SB_SYNCHRONOUS) {
                err = fat_sync_bhs(bhs, nr_bhs);
                if (err)
                        goto error;
index 30c52394a7adbc4041c5331d1c10dd9b52ee4bec..20a0a89eaca589de58d70d89c9625ca9a30d0143 100644 (file)
@@ -779,14 +779,14 @@ static void __exit fat_destroy_inodecache(void)
 
 static int fat_remount(struct super_block *sb, int *flags, char *data)
 {
-       int new_rdonly;
+       bool new_rdonly;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
-       *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+       *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
 
        sync_filesystem(sb);
 
        /* make sure we update state on remount. */
-       new_rdonly = *flags & MS_RDONLY;
+       new_rdonly = *flags & SB_RDONLY;
        if (new_rdonly != sb_rdonly(sb)) {
                if (new_rdonly)
                        fat_set_state(sb, 0, 0);
@@ -1352,7 +1352,7 @@ out:
        if (opts->unicode_xlate)
                opts->utf8 = 0;
        if (opts->nfs == FAT_NFS_NOSTALE_RO) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                sb->s_export_op = &fat_export_ops_nostale;
        }
 
@@ -1608,7 +1608,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                return -ENOMEM;
        sb->s_fs_info = sbi;
 
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
        sb->s_magic = MSDOS_SUPER_MAGIC;
        sb->s_op = &fat_sops;
        sb->s_export_op = &fat_export_ops;
index acc3aa30ee54988bd99e172e3db2b8ae83067c31..f9bdc1e01c98e7969d0e49d21bb468bef2eaaddb 100644 (file)
@@ -33,7 +33,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
        if (opts->errors == FAT_ERRORS_PANIC)
                panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id);
        else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) {
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                fat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
        }
 }
index 7d6a105d601b52d3d5b356e94c0c7ab87a818263..d24d2758a36327d35c8eb635f835851ee8de3c78 100644 (file)
@@ -646,7 +646,7 @@ static void setup(struct super_block *sb)
 {
        MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
        sb->s_d_op = &msdos_dentry_operations;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
 }
 
 static int msdos_fill_super(struct super_block *sb, void *data, int silent)
index 455ce5b77e9bf9eea279dccdf4a31a2a2154cd74..f989efa051a0d52423ede82a2b049c5f2f04d03d 100644 (file)
@@ -116,7 +116,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
 static int vxfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -220,7 +220,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
        int ret = -EINVAL;
        u32 j;
 
-       sbp->s_flags |= MS_RDONLY;
+       sbp->s_flags |= SB_RDONLY;
 
        infp = kzalloc(sizeof(*infp), GFP_KERNEL);
        if (!infp) {
index 08f5debd07d10135b7a472b6e93394b681aa6228..cea4836385b72ee16f1a6d0f8b347f4b4698512d 100644 (file)
@@ -490,7 +490,7 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
 
        /* while holding I_WB_SWITCH, no one else can update the association */
        spin_lock(&inode->i_lock);
-       if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+       if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
            inode->i_state & (I_WB_SWITCH | I_FREEING) ||
            inode_to_wb(inode) == isw->new_wb) {
                spin_unlock(&inode->i_lock);
index 2f504d615d9236663bfcc7c38eca354ce472897d..624f18bbfd2b3430e4a5d67c54cf84af4312a440 100644 (file)
@@ -130,7 +130,7 @@ static void fuse_evict_inode(struct inode *inode)
 {
        truncate_inode_pages_final(&inode->i_data);
        clear_inode(inode);
-       if (inode->i_sb->s_flags & MS_ACTIVE) {
+       if (inode->i_sb->s_flags & SB_ACTIVE) {
                struct fuse_conn *fc = get_fuse_conn(inode);
                struct fuse_inode *fi = get_fuse_inode(inode);
                fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
@@ -141,7 +141,7 @@ static void fuse_evict_inode(struct inode *inode)
 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (*flags & MS_MANDLOCK)
+       if (*flags & SB_MANDLOCK)
                return -EINVAL;
 
        return 0;
@@ -1056,10 +1056,10 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        int is_bdev = sb->s_bdev != NULL;
 
        err = -EINVAL;
-       if (sb->s_flags & MS_MANDLOCK)
+       if (sb->s_flags & SB_MANDLOCK)
                goto err;
 
-       sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
+       sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
 
        if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
@@ -1109,9 +1109,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                goto err_dev_free;
 
        /* Handle umasking inside the fuse code */
-       if (sb->s_flags & MS_POSIXACL)
+       if (sb->s_flags & SB_POSIXACL)
                fc->dont_mask = 1;
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 
        fc->default_permissions = d.default_permissions;
        fc->allow_other = d.allow_other;
index a3711f543405218ddc6f88c45d60505955f11740..ad55eb86a2504bd048ddd6c674b78ec7c5e9df27 100644 (file)
@@ -1065,15 +1065,15 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
        sdp->sd_args = *args;
 
        if (sdp->sd_args.ar_spectator) {
-                sb->s_flags |= MS_RDONLY;
+                sb->s_flags |= SB_RDONLY;
                set_bit(SDF_RORECOVERY, &sdp->sd_flags);
        }
        if (sdp->sd_args.ar_posix_acl)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        if (sdp->sd_args.ar_nobarrier)
                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
 
-       sb->s_flags |= MS_NOSEC;
+       sb->s_flags |= SB_NOSEC;
        sb->s_magic = GFS2_MAGIC;
        sb->s_op = &gfs2_super_ops;
        sb->s_d_op = &gfs2_dops;
@@ -1257,7 +1257,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
        struct gfs2_args args;
        struct gfs2_sbd *sdp;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1313,15 +1313,15 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
 
        if (s->s_root) {
                error = -EBUSY;
-               if ((flags ^ s->s_flags) & MS_RDONLY)
+               if ((flags ^ s->s_flags) & SB_RDONLY)
                        goto error_super;
        } else {
                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
                sb_set_blocksize(s, block_size(bdev));
-               error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
+               error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
                if (error)
                        goto error_super;
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
                bdev->bd_super = s;
        }
 
@@ -1365,7 +1365,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
                pr_warn("gfs2 mount does not exist\n");
                return ERR_CAST(s);
        }
-       if ((flags ^ s->s_flags) & MS_RDONLY) {
+       if ((flags ^ s->s_flags) & SB_RDONLY) {
                deactivate_locked_super(s);
                return ERR_PTR(-EBUSY);
        }
index 9cb5c9a97d69d04cf564ca69e48bfd15f701ea7c..d81d46e19726445801a60feb65a0513cb190b59f 100644 (file)
@@ -1256,10 +1256,10 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
                return -EINVAL;
 
        if (sdp->sd_args.ar_spectator)
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
 
-       if ((sb->s_flags ^ *flags) & MS_RDONLY) {
-               if (*flags & MS_RDONLY)
+       if ((sb->s_flags ^ *flags) & SB_RDONLY) {
+               if (*flags & SB_RDONLY)
                        error = gfs2_make_fs_ro(sdp);
                else
                        error = gfs2_make_fs_rw(sdp);
@@ -1269,9 +1269,9 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
 
        sdp->sd_args = args;
        if (sdp->sd_args.ar_posix_acl)
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        else
-               sb->s_flags &= ~MS_POSIXACL;
+               sb->s_flags &= ~SB_POSIXACL;
        if (sdp->sd_args.ar_nobarrier)
                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
        else
index a85ca8b2c9ba4aa43439e2f350acbab9dda73b91..ca8b72d0a8315384161403e814983877b823f502 100644 (file)
@@ -117,7 +117,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
                kfree(tr);
        up_read(&sdp->sd_log_flush_lock);
 
-       if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
+       if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
                gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
        if (alloced)
                sb_end_intwrite(sdp->sd_vfs);
index 8aec5e732abf94efdb3def2b75a6134379b982e9..b63a4df7327b6d68690ef086eb7c30391d36e610 100644 (file)
@@ -98,13 +98,11 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
                struct hfs_bnode *src_node, int src, int len)
 {
-       struct hfs_btree *tree;
        struct page *src_page, *dst_page;
 
        hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
        if (!len)
                return;
-       tree = src_node->tree;
        src += src_node->page_offset;
        dst += dst_node->page_offset;
        src_page = src_node->page[0];
@@ -237,7 +235,6 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
 
 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
 {
-       struct super_block *sb;
        struct hfs_bnode *node, *node2;
        struct address_space *mapping;
        struct page *page;
@@ -249,7 +246,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
                return NULL;
        }
 
-       sb = tree->inode->i_sb;
        size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
                sizeof(struct page *);
        node = kzalloc(size, GFP_KERNEL);
index 894994d2c88501cfb27fc1f46561fcc21e4e0d1e..460281b1299eb1eff15b5a4644a130e3e0f441ef 100644 (file)
@@ -204,11 +204,11 @@ int hfs_mdb_get(struct super_block *sb)
        attrib = mdb->drAtrb;
        if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
                pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended.  mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
                pr_warn("filesystem is marked locked, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        if (!sb_rdonly(sb)) {
                /* Mark the volume uncleanly unmounted in case we crash */
index 7e0d65e9586c7dae01d929bfe2e5b6ba373d20c5..173876782f73fd33838bec0e5508cbe79fa5c868 100644 (file)
@@ -114,18 +114,18 @@ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int hfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       *flags |= SB_NODIRATIME;
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (!(*flags & MS_RDONLY)) {
+       if (!(*flags & SB_RDONLY)) {
                if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
                        pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended.  leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
                        pr_warn("filesystem is marked locked, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                }
        }
        return 0;
@@ -407,7 +407,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_op = &hfs_super_operations;
        sb->s_xattr = hfs_xattr_handlers;
-       sb->s_flags |= MS_NODIRATIME;
+       sb->s_flags |= SB_NODIRATIME;
        mutex_init(&sbi->bitmap_lock);
 
        res = hfs_mdb_get(sb);
index d77015c3f22c2208e9ba0bce7733fe5f9f639d40..177fae4e6581439bb7748ffdba99f91095da3bcc 100644 (file)
@@ -127,14 +127,12 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
                    struct hfs_bnode *src_node, int src, int len)
 {
-       struct hfs_btree *tree;
        struct page **src_page, **dst_page;
        int l;
 
        hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
        if (!len)
                return;
-       tree = src_node->tree;
        src += src_node->page_offset;
        dst += dst_node->page_offset;
        src_page = src_node->page + (src >> PAGE_SHIFT);
@@ -401,7 +399,6 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
 
 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
 {
-       struct super_block *sb;
        struct hfs_bnode *node, *node2;
        struct address_space *mapping;
        struct page *page;
@@ -414,7 +411,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
                return NULL;
        }
 
-       sb = tree->inode->i_sb;
        size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
                sizeof(struct page *);
        node = kzalloc(size, GFP_KERNEL);
index e5bb2de2262ae68c64a061673f93220ae8872383..1d458b7169572c60628ccf6391846d8b2c1adea6 100644 (file)
@@ -329,9 +329,9 @@ static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (!(*flags & MS_RDONLY)) {
+       if (!(*flags & SB_RDONLY)) {
                struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
                int force = 0;
 
@@ -340,20 +340,20 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
 
                if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
                        pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (force) {
                        /* nothing */
                } else if (vhdr->attributes &
                                cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                        pr_warn("filesystem is marked locked, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                } else if (vhdr->attributes &
                                cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
                        pr_warn("filesystem is marked journaled, leaving read-only.\n");
-                       sb->s_flags |= MS_RDONLY;
-                       *flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
+                       *flags |= SB_RDONLY;
                }
        }
        return 0;
@@ -455,16 +455,16 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
 
        if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
                pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
                /* nothing */
        } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
                pr_warn("Filesystem is marked locked, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
                        !sb_rdonly(sb)) {
                pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        err = -EINVAL;
index e0e60b1484006f9ca3d396399221983d4171eab9..7c49f1ef0c850320b351397e0c1f2d93f267131a 100644 (file)
@@ -288,7 +288,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
                                        goto bail;
                                }
                                if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
-                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
+                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok;
                                        hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
                                        goto bail;
                                }
index 1516fb4e28f409d045e00685b4a2c55732c088f0..c45a3b9b9ac7e22861b5dcd7816a884a15bb62bb 100644 (file)
@@ -78,7 +78,7 @@ void hpfs_error(struct super_block *s, const char *fmt, ...)
                        else {
                                pr_cont("; remounting read-only\n");
                                mark_dirty(s, 0);
-                               s->s_flags |= MS_RDONLY;
+                               s->s_flags |= SB_RDONLY;
                        }
                } else if (sb_rdonly(s))
                                pr_cont("; going on - but anything won't be destroyed because it's read-only\n");
@@ -457,7 +457,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
 
        sync_filesystem(s);
 
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
 
        hpfs_lock(s);
        uid = sbi->sb_uid; gid = sbi->sb_gid;
@@ -488,7 +488,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
        sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
        sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
 
-       if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
+       if (!(*flags & SB_RDONLY)) mark_dirty(s, 1);
 
        hpfs_unlock(s);
        return 0;
@@ -614,7 +614,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                goto bail4;
        }
 
-       s->s_flags |= MS_NOATIME;
+       s->s_flags |= SB_NOATIME;
 
        /* Fill superblock stuff */
        s->s_magic = HPFS_SUPER_MAGIC;
index 1e76730aac0deb99df8e39165d959063da93225d..8a85f3f53446521991550583a0f106dd7af042c7 100644 (file)
@@ -639,11 +639,11 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
                /*
-                * page_put due to reference from alloc_huge_page()
                 * unlock_page because locked by add_to_page_cache()
+                * page_put due to reference from alloc_huge_page()
                 */
-               put_page(page);
                unlock_page(page);
+               put_page(page);
        }
 
        if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
index fd401028a309e2d260ace4aaba996b8838b13fb1..03102d6ef044d484ee9d7d1c9731437a4418d2c9 100644 (file)
@@ -416,7 +416,7 @@ void inode_add_lru(struct inode *inode)
 {
        if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
                                I_FREEING | I_WILL_FREE)) &&
-           !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
+           !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
                inode_lru_list_add(inode);
 }
 
@@ -595,7 +595,7 @@ static void dispose_list(struct list_head *head)
  * @sb:                superblock to operate on
  *
  * Make sure that no inodes with zero refcount are retained.  This is
- * called by superblock shutdown after having MS_ACTIVE flag removed,
+ * called by superblock shutdown after having SB_ACTIVE flag removed,
  * so any inode reaching zero refcount during or after that call will
  * be immediately evicted.
  */
@@ -1492,7 +1492,7 @@ static void iput_final(struct inode *inode)
        else
                drop = generic_drop_inode(inode);
 
-       if (!drop && (sb->s_flags & MS_ACTIVE)) {
+       if (!drop && (sb->s_flags & SB_ACTIVE)) {
                inode_add_lru(inode);
                spin_unlock(&inode->i_lock);
                return;
@@ -1644,7 +1644,7 @@ int generic_update_time(struct inode *inode, struct timespec *time, int flags)
        if (flags & S_MTIME)
                inode->i_mtime = *time;
 
-       if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
+       if (!(inode->i_sb->s_flags & SB_LAZYTIME) || (flags & S_VERSION))
                iflags |= I_DIRTY_SYNC;
        __mark_inode_dirty(inode, iflags);
        return 0;
@@ -1691,7 +1691,7 @@ bool __atime_needs_update(const struct path *path, struct inode *inode,
 
        if (IS_NOATIME(inode))
                return false;
-       if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+       if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
                return false;
 
        if (mnt->mnt_flags & MNT_NOATIME)
index 447a24d77b894ef733412ba201cadcaa9a226f7e..bc258a4402f6afbc921a508db37f95002f2ff7f0 100644 (file)
@@ -114,7 +114,7 @@ static void destroy_inodecache(void)
 static int isofs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                return -EROFS;
        return 0;
 }
index e96c6b05e43e786ae2dc61bd2cea74fc88631887..d8c274d39ddb986c38cc9482c11de8711b9a8964 100644 (file)
@@ -409,10 +409,10 @@ int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
                mutex_unlock(&c->alloc_sem);
        }
 
-       if (!(*flags & MS_RDONLY))
+       if (!(*flags & SB_RDONLY))
                jffs2_start_garbage_collect_thread(c);
 
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
index 824e61ede465fd6ec4c433785dcdfb2da96dc848..c2fbec19c6167c8ea1d0393e9f3b57d6d851b361 100644 (file)
@@ -59,7 +59,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
 }
 
 
-#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & SB_RDONLY)
 
 #define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
index 153f1c6eb16932a9b87fc9a3ecd2e06b5955c245..f60dee7faf0373f12bdfcf7f3d5f3883ec86b16e 100644 (file)
@@ -301,10 +301,10 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_op = &jffs2_super_operations;
        sb->s_export_op = &jffs2_export_ops;
-       sb->s_flags = sb->s_flags | MS_NOATIME;
+       sb->s_flags = sb->s_flags | SB_NOATIME;
        sb->s_xattr = jffs2_xattr_handlers;
 #ifdef CONFIG_JFFS2_FS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        ret = jffs2_do_fill_super(sb, data, silent);
        return ret;
index 2f7b3af5b8b7aa9fef38db0c464f5a4d013aff7b..90373aebfdca16057bb14bff3097b1581b9d205d 100644 (file)
@@ -87,7 +87,7 @@ static void jfs_handle_error(struct super_block *sb)
        else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
                jfs_err("ERROR: (device %s): remounting filesystem as read-only",
                        sb->s_id);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /* nothing is done for continue beyond marking the superblock dirty */
@@ -477,7 +477,7 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
                        return rc;
        }
 
-       if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+       if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
                /*
                 * Invalidate any previously read metadata.  fsck may have
                 * changed the on-disk data since we mounted r/o
@@ -488,12 +488,12 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
                ret = jfs_mount_rw(sb, 1);
 
                /* mark the fs r/w for quota activity */
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                dquot_resume(sb, -1);
                return ret;
        }
-       if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+       if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
                rc = dquot_suspend(sb, -1);
                if (rc < 0)
                        return rc;
@@ -545,7 +545,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
        sbi->flag = flag;
 
 #ifdef CONFIG_JFS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
 
        if (newLVSize) {
index 95a7c88baed9d32284c073c46e248d670abca1b5..26dd9a50f38382a069c2680260d0a619d0db60e3 100644 (file)
@@ -335,7 +335,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
                        deactivate_locked_super(sb);
                        return ERR_PTR(error);
                }
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
 
                mutex_lock(&kernfs_mutex);
                list_add(&info->node, &root->supers);
index 3aabe553fc4500a864459bb08f520dd820c94420..7ff3cb904acdf8042c7c169dc9816764f0319629 100644 (file)
@@ -246,7 +246,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
        struct inode *root;
        struct qstr d_name = QSTR_INIT(name, strlen(name));
 
-       s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+       s = sget_userns(fs_type, NULL, set_anon_super, SB_KERNMOUNT|SB_NOUSER,
                        &init_user_ns, NULL);
        if (IS_ERR(s))
                return ERR_CAST(s);
@@ -277,7 +277,7 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
        d_instantiate(dentry, root);
        s->s_root = dentry;
        s->s_d_op = dops;
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
        return dget(s->s_root);
 
 Enomem:
@@ -578,7 +578,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c
        spin_lock(&pin_fs_lock);
        if (unlikely(!*mount)) {
                spin_unlock(&pin_fs_lock);
-               mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL);
+               mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
                if (IS_ERR(mnt))
                        return PTR_ERR(mnt);
                spin_lock(&pin_fs_lock);
index 0d4e590e05498b69a7e93a35b7dcde8d93e56a7d..826a89184f90fc7d104980b15922dfa565146cf1 100644 (file)
@@ -578,8 +578,10 @@ static void nlm_complain_hosts(struct net *net)
 
                if (ln->nrhosts == 0)
                        return;
-               printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net);
-               dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net);
+               pr_warn("lockd: couldn't shutdown host module for net %x!\n",
+                       net->ns.inum);
+               dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts,
+                       net->ns.inum);
        } else {
                if (nrhosts == 0)
                        return;
@@ -590,9 +592,9 @@ static void nlm_complain_hosts(struct net *net)
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
-               dprintk("       %s (cnt %d use %d exp %ld net %p)\n",
+               dprintk("       %s (cnt %d use %d exp %ld net %x)\n",
                        host->h_name, atomic_read(&host->h_count),
-                       host->h_inuse, host->h_expires, host->net);
+                       host->h_inuse, host->h_expires, host->net->ns.inum);
        }
 }
 
@@ -605,7 +607,8 @@ nlm_shutdown_hosts_net(struct net *net)
        mutex_lock(&nlm_host_mutex);
 
        /* First, make all hosts eligible for gc */
-       dprintk("lockd: nuking all hosts in net %p...\n", net);
+       dprintk("lockd: nuking all hosts in net %x...\n",
+               net ? net->ns.inum : 0);
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
@@ -618,9 +621,8 @@ nlm_shutdown_hosts_net(struct net *net)
 
        /* Then, perform a garbage collection pass */
        nlm_gc_hosts(net);
-       mutex_unlock(&nlm_host_mutex);
-
        nlm_complain_hosts(net);
+       mutex_unlock(&nlm_host_mutex);
 }
 
 /*
@@ -646,7 +648,8 @@ nlm_gc_hosts(struct net *net)
        struct hlist_node *next;
        struct nlm_host *host;
 
-       dprintk("lockd: host garbage collection for net %p\n", net);
+       dprintk("lockd: host garbage collection for net %x\n",
+               net ? net->ns.inum : 0);
        for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
@@ -662,9 +665,10 @@ nlm_gc_hosts(struct net *net)
                if (atomic_read(&host->h_count) || host->h_inuse
                 || time_before(jiffies, host->h_expires)) {
                        dprintk("nlm_gc_hosts skipping %s "
-                               "(cnt %d use %d exp %ld net %p)\n",
+                               "(cnt %d use %d exp %ld net %x)\n",
                                host->h_name, atomic_read(&host->h_count),
-                               host->h_inuse, host->h_expires, host->net);
+                               host->h_inuse, host->h_expires,
+                               host->net->ns.inum);
                        continue;
                }
                nlm_destroy_host_locked(host);
index 9fbbd11f9ecbbcc9e29aa989c33a58fc8ad727ff..96cfb2967ac7571d0ab6c7640198fbd7415deb22 100644 (file)
@@ -110,7 +110,8 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
        clnt = nsm_create(host->net, host->nodename);
        if (IS_ERR(clnt)) {
                dprintk("lockd: failed to create NSM upcall transport, "
-                       "status=%ld, net=%p\n", PTR_ERR(clnt), host->net);
+                       "status=%ld, net=%x\n", PTR_ERR(clnt),
+                       host->net->ns.inum);
                return PTR_ERR(clnt);
        }
 
index b837fb7e290a6a63346f1ad7f00e63f2db8a5382..9c36d614bf89602121427c27d443365689d31aac 100644 (file)
@@ -57,6 +57,9 @@ static struct task_struct     *nlmsvc_task;
 static struct svc_rqst         *nlmsvc_rqst;
 unsigned long                  nlmsvc_timeout;
 
+atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
+
 unsigned int lockd_net_id;
 
 /*
@@ -259,7 +262,7 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
        if (error < 0)
                goto err_bind;
        set_grace_period(net);
-       dprintk("lockd_up_net: per-net data created; net=%p\n", net);
+       dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum);
        return 0;
 
 err_bind:
@@ -274,12 +277,15 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
        if (ln->nlmsvc_users) {
                if (--ln->nlmsvc_users == 0) {
                        nlm_shutdown_hosts_net(net);
+                       cancel_delayed_work_sync(&ln->grace_period_end);
+                       locks_end_grace(&ln->lockd_manager);
                        svc_shutdown_net(serv, net);
-                       dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
+                       dprintk("%s: per-net data destroyed; net=%x\n",
+                               __func__, net->ns.inum);
                }
        } else {
-               printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
-                               nlmsvc_task, net);
+               pr_err("%s: no users! task=%p, net=%x\n",
+                       __func__, nlmsvc_task, net->ns.inum);
                BUG();
        }
 }
@@ -290,7 +296,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
        struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
        struct sockaddr_in sin;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nlm_ntf_refcnt))
                goto out;
 
        if (nlmsvc_rqst) {
@@ -301,6 +308,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
                svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
                        (struct sockaddr *)&sin);
        }
+       atomic_dec(&nlm_ntf_refcnt);
+       wake_up(&nlm_ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -317,7 +326,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
        struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
        struct sockaddr_in6 sin6;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nlm_ntf_refcnt))
                goto out;
 
        if (nlmsvc_rqst) {
@@ -329,6 +339,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
                svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
                        (struct sockaddr *)&sin6);
        }
+       atomic_dec(&nlm_ntf_refcnt);
+       wake_up(&nlm_ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -345,10 +357,12 @@ static void lockd_unregister_notifiers(void)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
 #endif
+       wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
 }
 
 static void lockd_svc_exit_thread(void)
 {
+       atomic_dec(&nlm_ntf_refcnt);
        lockd_unregister_notifiers();
        svc_exit_thread(nlmsvc_rqst);
 }
@@ -369,9 +383,11 @@ static int lockd_start_svc(struct svc_serv *serv)
                printk(KERN_WARNING
                        "lockd_up: svc_rqst allocation failed, error=%d\n",
                        error);
+               lockd_unregister_notifiers();
                goto out_rqst;
        }
 
+       atomic_inc(&nlm_ntf_refcnt);
        svc_sock_update_bufs(serv);
        serv->sv_maxconn = nlm_max_connections;
 
@@ -459,13 +475,16 @@ int lockd_up(struct net *net)
        }
 
        error = lockd_up_net(serv, net);
-       if (error < 0)
-               goto err_net;
+       if (error < 0) {
+               lockd_unregister_notifiers();
+               goto err_put;
+       }
 
        error = lockd_start_svc(serv);
-       if (error < 0)
-               goto err_start;
-
+       if (error < 0) {
+               lockd_down_net(serv, net);
+               goto err_put;
+       }
        nlmsvc_users++;
        /*
         * Note: svc_serv structures have an initial use count of 1,
@@ -476,12 +495,6 @@ err_put:
 err_create:
        mutex_unlock(&nlmsvc_mutex);
        return error;
-
-err_start:
-       lockd_down_net(serv, net);
-err_net:
-       lockd_unregister_notifiers();
-       goto err_put;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
@@ -678,6 +691,17 @@ static int lockd_init_net(struct net *net)
 
 static void lockd_exit_net(struct net *net)
 {
+       struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+       WARN_ONCE(!list_empty(&ln->lockd_manager.list),
+                 "net %x %s: lockd_manager.list is not empty\n",
+                 net->ns.inum, __func__);
+       WARN_ONCE(!list_empty(&ln->nsm_handles),
+                 "net %x %s: nsm_handles list is not empty\n",
+                 net->ns.inum, __func__);
+       WARN_ONCE(delayed_work_pending(&ln->grace_period_end),
+                 "net %x %s: grace_period_end was not cancelled\n",
+                 net->ns.inum, __func__);
 }
 
 static struct pernet_operations lockd_net_ops = {
index a563ddbc19e6935e91fe42a6946ee937efab482c..4ec3d6e03e76dc0909c25fd90b4c9e2ee0dd33a8 100644 (file)
@@ -370,7 +370,7 @@ nlmsvc_mark_resources(struct net *net)
 {
        struct nlm_host hint;
 
-       dprintk("lockd: nlmsvc_mark_resources for net %p\n", net);
+       dprintk("lockd: %s for net %x\n", __func__, net ? net->ns.inum : 0);
        hint.net = net;
        nlm_traverse_files(&hint, nlmsvc_mark_host, NULL);
 }
index 1bd71c4d663a8c0356c457905293b5e9e35f2c9a..21b4dfa289eea6e6575c78e966efa691eaeff494 100644 (file)
 
 static inline bool is_remote_lock(struct file *filp)
 {
-       return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
+       return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
 }
 
 static bool lease_breaking(struct file_lock *fl)
index d818fd23678700bf8435af2fa64ed62dfdba4d2c..b8b8b9ced9f81c47a1f76da3ef61c2f9d4645c78 100644 (file)
@@ -269,6 +269,9 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
        struct mb_cache *cache = container_of(shrink, struct mb_cache,
                                              c_shrink);
 
+       /* Unlikely, but not impossible */
+       if (unlikely(cache->c_entry_count < 0))
+               return 0;
        return cache->c_entry_count;
 }
 
index b6829d67964324783f182be42be939753f07a7d0..72e308c3e66b91fa9a915ebcb6c31f7f62c432bc 100644 (file)
@@ -125,9 +125,9 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
 
        sync_filesystem(sb);
        ms = sbi->s_ms;
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                return 0;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                if (ms->s_state & MINIX_VALID_FS ||
                    !(sbi->s_mount_state & MINIX_VALID_FS))
                        return 0;
index f0c7a7b9b6ca7562217746369cd8d5d82d43a99f..9cc91fb7f156541bd53243b35c2823bbf9ca1133 100644 (file)
@@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
         * of the daemon to instantiate them before they can be used.
         */
        if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                          LOOKUP_OPEN | LOOKUP_CREATE |
-                          LOOKUP_AUTOMOUNT))) {
-               /* Positive dentry that isn't meant to trigger an
-                * automount, EISDIR will allow it to be used,
-                * otherwise there's no mount here "now" so return
-                * ENOENT.
-                */
-               if (path->dentry->d_inode)
-                       return -EISDIR;
-               else
-                       return -ENOENT;
-       }
+                          LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
 
        if (path->dentry->d_sb->s_user_ns != &init_user_ns)
                return -EACCES;
index 129f1937fa2c11527633c98a8840c2aae011fa0e..41de88cdc053fa4c6f8921cd2ee29f1a911642a4 100644 (file)
@@ -103,7 +103,7 @@ static void destroy_inodecache(void)
 static int ncp_remount(struct super_block *sb, int *flags, char* data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NODIRATIME;
+       *flags |= SB_NODIRATIME;
        return 0;
 }
 
@@ -547,7 +547,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
        else
                default_bufsize = 1024;
 
-       sb->s_flags |= MS_NODIRATIME;   /* probably even noatime */
+       sb->s_flags |= SB_NODIRATIME;   /* probably even noatime */
        sb->s_maxbytes = 0xFFFFFFFFU;
        sb->s_blocksize = 1024; /* Eh...  Is this correct? */
        sb->s_blocksize_bits = 10;
index e51ae52ed14ff5ef6bc28dd4d9e60d1f33a9fb6c..2f3f86726f5b96cdbdf426a0ee7264b6fd3aa5ae 100644 (file)
@@ -1256,7 +1256,7 @@ static int nfs_dentry_delete(const struct dentry *dentry)
                /* Unhash it, so that ->d_iput() would be called */
                return 1;
        }
-       if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
+       if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
                /* Unhash it, so that ancestors of killed async unlink
                 * files will be cleaned up during umount */
                return 1;
index 38b93d54c02e2d64111e5ff522795badf27fdcf7..b992d2382ffa373d038f84761ab19f9bfaa6a9e9 100644 (file)
@@ -752,7 +752,7 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
         * Note that we only have to check the vfsmount flags here:
         *  - NFS always sets S_NOATIME by so checking it would give a
         *    bogus result
-        *  - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
+        *  - NFS never sets SB_NOATIME or SB_NODIRATIME so there is
         *    no point in checking those.
         */
        if ((path->mnt->mnt_flags & MNT_NOATIME) ||
index 5ab17fd4700a69bff2894a9d382488c5da717411..8357ff69962f22caae59d1ac357058d89800e963 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/nfs_page.h>
 #include <linux/wait_bit.h>
 
-#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
 
 extern const struct export_operations nfs_export_ops;
 
index 43cadb28db6ef5a2f4e4a1d98eb6d98b1d9207b3..29bacdc56f6a9fcf83225844360088e180d32ad7 100644 (file)
@@ -813,9 +813,9 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
         */
        seq_printf(m, "\n\topts:\t");
        seq_puts(m, sb_rdonly(root->d_sb) ? "ro" : "rw");
-       seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
-       seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
-       seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_SYNCHRONOUS ? ",sync" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_NOATIME ? ",noatime" : "");
+       seq_puts(m, root->d_sb->s_flags & SB_NODIRATIME ? ",nodiratime" : "");
        nfs_show_mount_options(m, nfss, 1);
 
        seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
@@ -2296,11 +2296,11 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
        /*
         * noac is a special case. It implies -o sync, but that's not
         * necessarily reflected in the mtab options. do_remount_sb
-        * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
+        * will clear SB_SYNCHRONOUS if -o sync wasn't specified in the
         * remount options, so we have to explicitly reset it.
         */
        if (data->flags & NFS_MOUNT_NOAC)
-               *flags |= MS_SYNCHRONOUS;
+               *flags |= SB_SYNCHRONOUS;
 
        /* compare new mount options with old ones */
        error = nfs_compare_remount_data(nfss, data);
@@ -2349,7 +2349,7 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
                /* The VFS shouldn't apply the umask to mode bits. We will do
                 * so ourselves when necessary.
                 */
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
                sb->s_time_gran = 1;
                sb->s_export_op = &nfs_export_ops;
        }
@@ -2379,7 +2379,7 @@ static void nfs_clone_super(struct super_block *sb,
                /* The VFS shouldn't apply the umask to mode bits. We will do
                 * so ourselves when necessary.
                 */
-               sb->s_flags |= MS_POSIXACL;
+               sb->s_flags |= SB_POSIXACL;
        }
 
        nfs_initialise_sb(sb);
@@ -2600,11 +2600,11 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
 
        /* -o noac implies -o sync */
        if (server->flags & NFS_MOUNT_NOAC)
-               sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+               sb_mntdata.mntflags |= SB_SYNCHRONOUS;
 
        if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
-               if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
-                       sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+               if (mount_info->cloned->sb->s_flags & SB_SYNCHRONOUS)
+                       sb_mntdata.mntflags |= SB_SYNCHRONOUS;
 
        /* Get a superblock - note that we may end up sharing one that already exists */
        s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
@@ -2641,7 +2641,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
        if (error)
                goto error_splat_root;
 
-       s->s_flags |= MS_ACTIVE;
+       s->s_flags |= SB_ACTIVE;
 
 out:
        return mntroot;
index 420d3a0ab258fb2b312310e50081bbab30f2c2ae..5be08f02a76bcb7f405bce8169f53ebf34c452a2 100644 (file)
@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
        struct list_head *grace_list = net_generic(net, grace_net_id);
 
        spin_lock(&grace_lock);
-       list_add(&lm->list, grace_list);
+       if (list_empty(&lm->list))
+               list_add(&lm->list, grace_list);
+       else
+               WARN(1, "double list_add attempt detected in net %x %s\n",
+                    net->ns.inum, (net == &init_net) ? "(init_net)" : "");
        spin_unlock(&grace_lock);
 }
 EXPORT_SYMBOL_GPL(locks_start_grace);
@@ -55,14 +59,7 @@ locks_end_grace(struct lock_manager *lm)
 }
 EXPORT_SYMBOL_GPL(locks_end_grace);
 
-/**
- * locks_in_grace
- *
- * Lock managers call this function to determine when it is OK for them
- * to answer ordinary lock requests, and when they should accept only
- * lock reclaims.
- */
-int
+static bool
 __state_in_grace(struct net *net, bool open)
 {
        struct list_head *grace_list = net_generic(net, grace_net_id);
@@ -78,15 +75,22 @@ __state_in_grace(struct net *net, bool open)
        return false;
 }
 
-int locks_in_grace(struct net *net)
+/**
+ * locks_in_grace
+ *
+ * Lock managers call this function to determine when it is OK for them
+ * to answer ordinary lock requests, and when they should accept only
+ * lock reclaims.
+ */
+bool locks_in_grace(struct net *net)
 {
-       return __state_in_grace(net, 0);
+       return __state_in_grace(net, false);
 }
 EXPORT_SYMBOL_GPL(locks_in_grace);
 
-int opens_in_grace(struct net *net)
+bool opens_in_grace(struct net *net)
 {
-       return __state_in_grace(net, 1);
+       return __state_in_grace(net, true);
 }
 EXPORT_SYMBOL_GPL(opens_in_grace);
 
@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
 {
        struct list_head *grace_list = net_generic(net, grace_net_id);
 
-       BUG_ON(!list_empty(grace_list));
+       WARN_ONCE(!list_empty(grace_list),
+                 "net %x %s: grace_list is not empty\n",
+                 net->ns.inum, __func__);
 }
 
 static struct pernet_operations grace_net_ops = {
index 46b48dbbdd3255260c2b505b49d87b8d2e874100..8ceb25a10ea0df002cae637d137d0f0e0d55a93b 100644 (file)
@@ -232,7 +232,7 @@ static struct cache_head *expkey_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_expkey_cache_template = {
+static const struct cache_detail svc_expkey_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPKEY_HASHMAX,
        .name           = "nfsd.fh",
@@ -748,7 +748,7 @@ static struct cache_head *svc_export_alloc(void)
                return NULL;
 }
 
-static struct cache_detail svc_export_cache_template = {
+static const struct cache_detail svc_export_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = EXPORT_HASHMAX,
        .name           = "nfsd.export",
@@ -1230,7 +1230,7 @@ nfsd_export_init(struct net *net)
        int rv;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: initializing export module (net: %p).\n", net);
+       dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum);
 
        nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
        if (IS_ERR(nn->svc_export_cache))
@@ -1278,7 +1278,7 @@ nfsd_export_shutdown(struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("nfsd: shutting down export module (net: %p).\n", net);
+       dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum);
 
        cache_unregister_net(nn->svc_expkey_cache, net);
        cache_unregister_net(nn->svc_export_cache, net);
@@ -1286,5 +1286,5 @@ nfsd_export_shutdown(struct net *net)
        cache_destroy_net(nn->svc_export_cache, net);
        svcauth_unix_purge(net);
 
-       dprintk("nfsd: export shutdown complete (net: %p).\n", net);
+       dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum);
 }
index 6dfede6d172aa276ba99544cf561cf4744220ff7..84831253203dda4a4926db4a532098ffee8a1f4c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/nsproxy.h>
 #include <linux/sunrpc/addr.h>
 #include <linux/uaccess.h>
+#include <linux/kernel.h>
 
 #include "state.h"
 #include "netns.h"
@@ -126,8 +127,6 @@ static struct nfsd_fault_inject_op inject_ops[] = {
        },
 };
 
-#define NUM_INJECT_OPS (sizeof(inject_ops)/sizeof(struct nfsd_fault_inject_op))
-
 int nfsd_fault_inject_init(void)
 {
        unsigned int i;
@@ -138,7 +137,7 @@ int nfsd_fault_inject_init(void)
        if (!debug_dir)
                goto fail;
 
-       for (i = 0; i < NUM_INJECT_OPS; i++) {
+       for (i = 0; i < ARRAY_SIZE(inject_ops); i++) {
                op = &inject_ops[i];
                if (!debugfs_create_file(op->file, mode, debug_dir, op, &fops_nfsd))
                        goto fail;
index 3714231a9d0fb71e4e440a9f8efa7113839c4392..36358d435cb044a3a740756ff8e247e77020bfac 100644 (file)
@@ -107,7 +107,7 @@ struct nfsd_net {
        bool lockd_up;
 
        /* Time of server startup */
-       struct timeval nfssvc_boot;
+       struct timespec64 nfssvc_boot;
 
        /*
         * Max number of connections this nfsd container will allow. Defaults
@@ -119,6 +119,9 @@ struct nfsd_net {
        u32 clverifier_counter;
 
        struct svc_serv *nfsd_serv;
+
+       wait_queue_head_t ntf_wq;
+       atomic_t ntf_refcnt;
 };
 
 /* Simple check to find out if a given net was properly initialized */
index f38acd9054419606e3abd25060599960d38c6f2c..2758480555faa504b1aafc204ea549361bf3b932 100644 (file)
@@ -748,8 +748,9 @@ nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p)
        if (resp->status == 0) {
                *p++ = htonl(resp->count);
                *p++ = htonl(resp->committed);
-               *p++ = htonl(nn->nfssvc_boot.tv_sec);
-               *p++ = htonl(nn->nfssvc_boot.tv_usec);
+               /* unique identifier, y2038 overflow can be ignored */
+               *p++ = htonl((u32)nn->nfssvc_boot.tv_sec);
+               *p++ = htonl(nn->nfssvc_boot.tv_nsec);
        }
        return xdr_ressize_check(rqstp, p);
 }
@@ -1119,8 +1120,9 @@ nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p)
        p = encode_wcc_data(rqstp, p, &resp->fh);
        /* Write verifier */
        if (resp->status == 0) {
-               *p++ = htonl(nn->nfssvc_boot.tv_sec);
-               *p++ = htonl(nn->nfssvc_boot.tv_usec);
+               /* unique identifier, y2038 overflow can be ignored */
+               *p++ = htonl((u32)nn->nfssvc_boot.tv_sec);
+               *p++ = htonl(nn->nfssvc_boot.tv_nsec);
        }
        return xdr_ressize_check(rqstp, p);
 }
index 6b9b6cca469f427fed55ec5d892141e38be23eb4..a5bb76593ce72c280ddbd8e5957beddea49ec413 100644 (file)
@@ -178,7 +178,7 @@ static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
 static struct ent *idtoname_update(struct cache_detail *, struct ent *,
                                   struct ent *);
 
-static struct cache_detail idtoname_cache_template = {
+static const struct cache_detail idtoname_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.idtoname",
@@ -341,7 +341,7 @@ static struct ent *nametoid_update(struct cache_detail *, struct ent *,
                                   struct ent *);
 static int         nametoid_parse(struct cache_detail *, char *, int);
 
-static struct cache_detail nametoid_cache_template = {
+static const struct cache_detail nametoid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.nametoid",
index ea45d954e8d7c53cbb3db6dcbf8ac3958b314a90..7d888369f85a4194b0ddf0c2202bb693fe9cac99 100644 (file)
@@ -336,7 +336,7 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
 
        trace_layout_recall(&ls->ls_stid.sc_stateid);
 
-       atomic_inc(&ls->ls_stid.sc_count);
+       refcount_inc(&ls->ls_stid.sc_count);
        nfsd4_run_cb(&ls->ls_recall);
 
 out_unlock:
@@ -441,7 +441,7 @@ nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
                        goto done;
        }
 
-       atomic_inc(&ls->ls_stid.sc_count);
+       refcount_inc(&ls->ls_stid.sc_count);
        list_add_tail(&new->lo_perstate, &ls->ls_layouts);
        new = NULL;
 done:
index 8487486ec4963efb72477e7cf2f19616108f12f2..008ea0b627d02d5a06f8b3febb793627e11b70c3 100644 (file)
@@ -485,9 +485,6 @@ static __be32
 nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
            union nfsd4_op_u *u)
 {
-       if (!cstate->current_fh.fh_dentry)
-               return nfserr_nofilehandle;
-
        u->getfh = &cstate->current_fh;
        return nfs_ok;
 }
@@ -535,9 +532,6 @@ static __be32
 nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
             union nfsd4_op_u *u)
 {
-       if (!cstate->current_fh.fh_dentry)
-               return nfserr_nofilehandle;
-
        fh_dup2(&cstate->save_fh, &cstate->current_fh);
        if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) {
                memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
@@ -570,10 +564,11 @@ static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
 
        /*
         * This is opaque to client, so no need to byte-swap. Use
-        * __force to keep sparse happy
+        * __force to keep sparse happy. y2038 time_t overflow is
+        * irrelevant in this usage.
         */
        verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
-       verf[1] = (__force __be32)nn->nfssvc_boot.tv_usec;
+       verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec;
        memcpy(verifier->data, verf, sizeof(verifier->data));
 }
 
@@ -703,10 +698,8 @@ nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
           union nfsd4_op_u *u)
 {
        struct nfsd4_link *link = &u->link;
-       __be32 status = nfserr_nofilehandle;
+       __be32 status;
 
-       if (!cstate->save_fh.fh_dentry)
-               return status;
        status = nfsd_link(rqstp, &cstate->current_fh,
                           link->li_name, link->li_namelen, &cstate->save_fh);
        if (!status)
@@ -850,10 +843,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
             union nfsd4_op_u *u)
 {
        struct nfsd4_rename *rename = &u->rename;
-       __be32 status = nfserr_nofilehandle;
+       __be32 status;
 
-       if (!cstate->save_fh.fh_dentry)
-               return status;
        if (opens_in_grace(SVC_NET(rqstp)) &&
                !(cstate->save_fh.fh_export->ex_flags & NFSEXP_NOSUBTREECHECK))
                return nfserr_grace;
index 0c04f81aa63b225b2207b226b1113e1973ec1e1b..b29b5a185a2cb444f95fce2685381755d05d9429 100644 (file)
@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
 static const stateid_t currentstateid = {
        .si_generation = 1,
 };
+static const stateid_t close_stateid = {
+       .si_generation = 0xffffffffU,
+};
 
 static u64 current_sessionid = 1;
 
 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
+#define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
 
 /* forward declarations */
 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
@@ -83,6 +87,11 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
  */
 static DEFINE_SPINLOCK(state_lock);
 
+enum nfsd4_st_mutex_lock_subclass {
+       OPEN_STATEID_MUTEX = 0,
+       LOCK_STATEID_MUTEX = 1,
+};
+
 /*
  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
  * the refcount on the open stateid to drop.
@@ -359,7 +368,7 @@ put_nfs4_file(struct nfs4_file *fi)
 {
        might_lock(&state_lock);
 
-       if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
+       if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
                hlist_del_rcu(&fi->fi_hash);
                spin_unlock(&state_lock);
                WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
@@ -568,7 +577,7 @@ alloc_clnt_odstate(struct nfs4_client *clp)
        co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
        if (co) {
                co->co_client = clp;
-               atomic_set(&co->co_odcount, 1);
+               refcount_set(&co->co_odcount, 1);
        }
        return co;
 }
@@ -586,7 +595,7 @@ static inline void
 get_clnt_odstate(struct nfs4_clnt_odstate *co)
 {
        if (co)
-               atomic_inc(&co->co_odcount);
+               refcount_inc(&co->co_odcount);
 }
 
 static void
@@ -598,7 +607,7 @@ put_clnt_odstate(struct nfs4_clnt_odstate *co)
                return;
 
        fp = co->co_file;
-       if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
+       if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
                list_del(&co->co_perfile);
                spin_unlock(&fp->fi_lock);
 
@@ -656,7 +665,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
        stid->sc_stateid.si_opaque.so_id = new_id;
        stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
        /* Will be incremented before return to client: */
-       atomic_set(&stid->sc_count, 1);
+       refcount_set(&stid->sc_count, 1);
        spin_lock_init(&stid->sc_lock);
 
        /*
@@ -813,7 +822,7 @@ nfs4_put_stid(struct nfs4_stid *s)
 
        might_lock(&clp->cl_lock);
 
-       if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
+       if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
                wake_up_all(&close_wq);
                return;
        }
@@ -913,7 +922,7 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
        if (status)
                return status;
        ++fp->fi_delegees;
-       atomic_inc(&dp->dl_stid.sc_count);
+       refcount_inc(&dp->dl_stid.sc_count);
        dp->dl_stid.sc_type = NFS4_DELEG_STID;
        list_add(&dp->dl_perfile, &fp->fi_delegations);
        list_add(&dp->dl_perclnt, &clp->cl_delegations);
@@ -1214,7 +1223,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
 
        WARN_ON_ONCE(!list_empty(&stp->st_locks));
 
-       if (!atomic_dec_and_test(&s->sc_count)) {
+       if (!refcount_dec_and_test(&s->sc_count)) {
                wake_up_all(&close_wq);
                return;
        }
@@ -1439,8 +1448,10 @@ free_session_slots(struct nfsd4_session *ses)
 {
        int i;
 
-       for (i = 0; i < ses->se_fchannel.maxreqs; i++)
+       for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
+               free_svc_cred(&ses->se_slots[i]->sl_cred);
                kfree(ses->se_slots[i]);
+       }
 }
 
 /*
@@ -1472,6 +1483,11 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
        spin_lock(&nfsd_drc_lock);
        avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
                    nfsd_drc_max_mem - nfsd_drc_mem_used);
+       /*
+        * Never use more than a third of the remaining memory,
+        * unless it's the only way to give this client a slot:
+        */
+       avail = clamp_t(int, avail, slotsize, avail/3);
        num = min_t(int, num, avail / slotsize);
        nfsd_drc_mem_used += num * slotsize;
        spin_unlock(&nfsd_drc_lock);
@@ -2072,7 +2088,7 @@ find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
        s = find_stateid_locked(cl, t);
        if (s != NULL) {
                if (typemask & s->sc_type)
-                       atomic_inc(&s->sc_count);
+                       refcount_inc(&s->sc_count);
                else
                        s = NULL;
        }
@@ -2287,14 +2303,18 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
 
        dprintk("--> %s slot %p\n", __func__, slot);
 
+       slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
        slot->sl_opcnt = resp->opcnt;
        slot->sl_status = resp->cstate.status;
+       free_svc_cred(&slot->sl_cred);
+       copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
 
-       slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
-       if (nfsd4_not_cached(resp)) {
-               slot->sl_datalen = 0;
+       if (!nfsd4_cache_this(resp)) {
+               slot->sl_flags &= ~NFSD4_SLOT_CACHED;
                return;
        }
+       slot->sl_flags |= NFSD4_SLOT_CACHED;
+
        base = resp->cstate.data_offset;
        slot->sl_datalen = buf->len - base;
        if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
@@ -2321,8 +2341,16 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
        op = &args->ops[resp->opcnt - 1];
        nfsd4_encode_operation(resp, op);
 
-       /* Return nfserr_retry_uncached_rep in next operation. */
-       if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
+       if (slot->sl_flags & NFSD4_SLOT_CACHED)
+               return op->status;
+       if (args->opcnt == 1) {
+               /*
+                * The original operation wasn't a solo sequence--we
+                * always cache those--so this retry must not match the
+                * original:
+                */
+               op->status = nfserr_seq_false_retry;
+       } else {
                op = &args->ops[resp->opcnt++];
                op->status = nfserr_retry_uncached_rep;
                nfsd4_encode_operation(resp, op);
@@ -2986,6 +3014,34 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
        return xb->len > session->se_fchannel.maxreq_sz;
 }
 
+static bool replay_matches_cache(struct svc_rqst *rqstp,
+                struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
+{
+       struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+
+       if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
+           (bool)seq->cachethis)
+               return false;
+       /*
+        * If there's an error than the reply can have fewer ops than
+        * the call.  But if we cached a reply with *more* ops than the
+        * call you're sending us now, then this new call is clearly not
+        * really a replay of the old one:
+        */
+       if (slot->sl_opcnt < argp->opcnt)
+               return false;
+       /* This is the only check explicitly called by spec: */
+       if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
+               return false;
+       /*
+        * There may be more comparisons we could actually do, but the
+        * spec doesn't require us to catch every case where the calls
+        * don't match (that would require caching the call as well as
+        * the reply), so we don't bother.
+        */
+       return true;
+}
+
 __be32
 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                union nfsd4_op_u *u)
@@ -3045,6 +3101,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                status = nfserr_seq_misordered;
                if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
                        goto out_put_session;
+               status = nfserr_seq_false_retry;
+               if (!replay_matches_cache(rqstp, seq, slot))
+                       goto out_put_session;
                cstate->slot = slot;
                cstate->session = session;
                cstate->clp = clp;
@@ -3351,7 +3410,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
 {
        lockdep_assert_held(&state_lock);
 
-       atomic_set(&fp->fi_ref, 1);
+       refcount_set(&fp->fi_ref, 1);
        spin_lock_init(&fp->fi_lock);
        INIT_LIST_HEAD(&fp->fi_stateids);
        INIT_LIST_HEAD(&fp->fi_delegations);
@@ -3512,15 +3571,63 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
                /* ignore lock owners */
                if (local->st_stateowner->so_is_open_owner == 0)
                        continue;
-               if (local->st_stateowner == &oo->oo_owner) {
+               if (local->st_stateowner != &oo->oo_owner)
+                       continue;
+               if (local->st_stid.sc_type == NFS4_OPEN_STID) {
                        ret = local;
-                       atomic_inc(&ret->st_stid.sc_count);
+                       refcount_inc(&ret->st_stid.sc_count);
                        break;
                }
        }
        return ret;
 }
 
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+       __be32 ret = nfs_ok;
+
+       switch (s->sc_type) {
+       default:
+               break;
+       case NFS4_CLOSED_STID:
+       case NFS4_CLOSED_DELEG_STID:
+               ret = nfserr_bad_stateid;
+               break;
+       case NFS4_REVOKED_DELEG_STID:
+               ret = nfserr_deleg_revoked;
+       }
+       return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+       __be32 ret;
+
+       mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
+       ret = nfsd4_verify_open_stid(&stp->st_stid);
+       if (ret != nfs_ok)
+               mutex_unlock(&stp->st_mutex);
+       return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+       struct nfs4_ol_stateid *stp;
+       for (;;) {
+               spin_lock(&fp->fi_lock);
+               stp = nfsd4_find_existing_open(fp, open);
+               spin_unlock(&fp->fi_lock);
+               if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+                       break;
+               nfs4_put_stid(&stp->st_stid);
+       }
+       return stp;
+}
+
 static struct nfs4_openowner *
 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
                           struct nfsd4_compound_state *cstate)
@@ -3563,8 +3670,9 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
        stp = open->op_stp;
        /* We are moving these outside of the spinlocks to avoid the warnings */
        mutex_init(&stp->st_mutex);
-       mutex_lock(&stp->st_mutex);
+       mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
 
+retry:
        spin_lock(&oo->oo_owner.so_client->cl_lock);
        spin_lock(&fp->fi_lock);
 
@@ -3573,7 +3681,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
                goto out_unlock;
 
        open->op_stp = NULL;
-       atomic_inc(&stp->st_stid.sc_count);
+       refcount_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_OPEN_STID;
        INIT_LIST_HEAD(&stp->st_locks);
        stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
@@ -3589,7 +3697,11 @@ out_unlock:
        spin_unlock(&fp->fi_lock);
        spin_unlock(&oo->oo_owner.so_client->cl_lock);
        if (retstp) {
-               mutex_lock(&retstp->st_mutex);
+               /* Handle races with CLOSE */
+               if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+                       nfs4_put_stid(&retstp->st_stid);
+                       goto retry;
+               }
                /* To keep mutex tracking happy */
                mutex_unlock(&stp->st_mutex);
                stp = retstp;
@@ -3621,7 +3733,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
         * there should be no danger of the refcount going back up again at
         * this point.
         */
-       wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
+       wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
 
        release_all_access(s);
        if (s->st_stid.sc_file) {
@@ -3647,7 +3759,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
 
        hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
                if (fh_match(&fp->fi_fhandle, fh)) {
-                       if (atomic_inc_not_zero(&fp->fi_ref))
+                       if (refcount_inc_not_zero(&fp->fi_ref))
                                return fp;
                }
        }
@@ -3783,7 +3895,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
         * lock) we know the server hasn't removed the lease yet, we know
         * it's safe to take a reference.
         */
-       atomic_inc(&dp->dl_stid.sc_count);
+       refcount_inc(&dp->dl_stid.sc_count);
        nfsd4_run_cb(&dp->dl_recall);
 }
 
@@ -3966,7 +4078,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
 {
        struct nfs4_stid *ret;
 
-       ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
+       ret = find_stateid_by_type(cl, s,
+                               NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
        if (!ret)
                return NULL;
        return delegstateid(ret);
@@ -3989,6 +4102,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
        deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
        if (deleg == NULL)
                goto out;
+       if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
+               nfs4_put_stid(&deleg->dl_stid);
+               if (cl->cl_minorversion)
+                       status = nfserr_deleg_revoked;
+               goto out;
+       }
        flags = share_access_to_flags(open->op_share_access);
        status = nfs4_check_delegmode(deleg, flags);
        if (status) {
@@ -4392,6 +4511,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfs4_ol_stateid *stp = NULL;
        struct nfs4_delegation *dp = NULL;
        __be32 status;
+       bool new_stp = false;
 
        /*
         * Lookup file; if found, lookup stateid and check open request,
@@ -4403,9 +4523,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                status = nfs4_check_deleg(cl, open, &dp);
                if (status)
                        goto out;
-               spin_lock(&fp->fi_lock);
-               stp = nfsd4_find_existing_open(fp, open);
-               spin_unlock(&fp->fi_lock);
+               stp = nfsd4_find_and_lock_existing_open(fp, open);
        } else {
                open->op_file = NULL;
                status = nfserr_bad_stateid;
@@ -4413,35 +4531,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                        goto out;
        }
 
+       if (!stp) {
+               stp = init_open_stateid(fp, open);
+               if (!open->op_stp)
+                       new_stp = true;
+       }
+
        /*
         * OPEN the file, or upgrade an existing OPEN.
         * If truncate fails, the OPEN fails.
+        *
+        * stp is already locked.
         */
-       if (stp) {
+       if (!new_stp) {
                /* Stateid was found, this is an OPEN upgrade */
-               mutex_lock(&stp->st_mutex);
                status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
                if (status) {
                        mutex_unlock(&stp->st_mutex);
                        goto out;
                }
        } else {
-               /* stp is returned locked. */
-               stp = init_open_stateid(fp, open);
-               /* See if we lost the race to some other thread */
-               if (stp->st_access_bmap != 0) {
-                       status = nfs4_upgrade_open(rqstp, fp, current_fh,
-                                               stp, open);
-                       if (status) {
-                               mutex_unlock(&stp->st_mutex);
-                               goto out;
-                       }
-                       goto upgrade_out;
-               }
                status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       mutex_unlock(&stp->st_mutex);
+                       stp->st_stid.sc_type = NFS4_CLOSED_STID;
                        release_open_stateid(stp);
+                       mutex_unlock(&stp->st_mutex);
                        goto out;
                }
 
@@ -4450,7 +4564,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                if (stp->st_clnt_odstate == open->op_odstate)
                        open->op_odstate = NULL;
        }
-upgrade_out:
+
        nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
        mutex_unlock(&stp->st_mutex);
 
@@ -4677,7 +4791,7 @@ nfs4_laundromat(struct nfsd_net *nn)
        spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
-               nbl = list_first_entry(&nn->blocked_locks_lru,
+               nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
                posix_unblock_lock(&nbl->nbl_lock);
@@ -4798,6 +4912,18 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
        return nfserr_old_stateid;
 }
 
+static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
+{
+       __be32 ret;
+
+       spin_lock(&s->sc_lock);
+       ret = nfsd4_verify_open_stid(s);
+       if (ret == nfs_ok)
+               ret = check_stateid_generation(in, &s->sc_stateid, has_session);
+       spin_unlock(&s->sc_lock);
+       return ret;
+}
+
 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
 {
        if (ols->st_stateowner->so_is_open_owner &&
@@ -4811,7 +4937,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
        struct nfs4_stid *s;
        __be32 status = nfserr_bad_stateid;
 
-       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+               CLOSE_STATEID(stateid))
                return status;
        /* Client debugging aid. */
        if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
@@ -4826,7 +4953,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
        s = find_stateid_locked(cl, stateid);
        if (!s)
                goto out_unlock;
-       status = check_stateid_generation(stateid, &s->sc_stateid, 1);
+       status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
        if (status)
                goto out_unlock;
        switch (s->sc_type) {
@@ -4858,8 +4985,19 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
                     struct nfs4_stid **s, struct nfsd_net *nn)
 {
        __be32 status;
+       bool return_revoked = false;
 
-       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+       /*
+        *  only return revoked delegations if explicitly asked.
+        *  otherwise we report revoked or bad_stateid status.
+        */
+       if (typemask & NFS4_REVOKED_DELEG_STID)
+               return_revoked = true;
+       else if (typemask & NFS4_DELEG_STID)
+               typemask |= NFS4_REVOKED_DELEG_STID;
+
+       if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+               CLOSE_STATEID(stateid))
                return nfserr_bad_stateid;
        status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
        if (status == nfserr_stale_clientid) {
@@ -4872,6 +5010,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
        *s = find_stateid_by_type(cstate->clp, stateid, typemask);
        if (!*s)
                return nfserr_bad_stateid;
+       if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
+               nfs4_put_stid(*s);
+               if (cstate->minorversion)
+                       return nfserr_deleg_revoked;
+               return nfserr_bad_stateid;
+       }
        return nfs_ok;
 }
 
@@ -4971,7 +5115,7 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                                &s, nn);
        if (status)
                return status;
-       status = check_stateid_generation(stateid, &s->sc_stateid,
+       status = nfsd4_stid_check_stateid_generation(stateid, s,
                        nfsd4_has_session(cstate));
        if (status)
                goto out;
@@ -5025,7 +5169,9 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
        struct nfs4_ol_stateid *stp = openlockstateid(s);
        __be32 ret;
 
-       mutex_lock(&stp->st_mutex);
+       ret = nfsd4_lock_ol_stateid(stp);
+       if (ret)
+               goto out_put_stid;
 
        ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
        if (ret)
@@ -5036,11 +5182,13 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
                            lockowner(stp->st_stateowner)))
                goto out;
 
+       stp->st_stid.sc_type = NFS4_CLOSED_STID;
        release_lock_stateid(stp);
        ret = nfs_ok;
 
 out:
        mutex_unlock(&stp->st_mutex);
+out_put_stid:
        nfs4_put_stid(s);
        return ret;
 }
@@ -5060,6 +5208,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        s = find_stateid_locked(cl, stateid);
        if (!s)
                goto out_unlock;
+       spin_lock(&s->sc_lock);
        switch (s->sc_type) {
        case NFS4_DELEG_STID:
                ret = nfserr_locks_held;
@@ -5071,11 +5220,13 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                ret = nfserr_locks_held;
                break;
        case NFS4_LOCK_STID:
-               atomic_inc(&s->sc_count);
+               spin_unlock(&s->sc_lock);
+               refcount_inc(&s->sc_count);
                spin_unlock(&cl->cl_lock);
                ret = nfsd4_free_lock_stateid(stateid, s);
                goto out;
        case NFS4_REVOKED_DELEG_STID:
+               spin_unlock(&s->sc_lock);
                dp = delegstateid(s);
                list_del_init(&dp->dl_recall_lru);
                spin_unlock(&cl->cl_lock);
@@ -5084,6 +5235,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                goto out;
        /* Default falls through and returns nfserr_bad_stateid */
        }
+       spin_unlock(&s->sc_lock);
 out_unlock:
        spin_unlock(&cl->cl_lock);
 out:
@@ -5106,15 +5258,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
        status = nfsd4_check_seqid(cstate, sop, seqid);
        if (status)
                return status;
-       if (stp->st_stid.sc_type == NFS4_CLOSED_STID
-               || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
-               /*
-                * "Closed" stateid's exist *only* to return
-                * nfserr_replay_me from the previous step, and
-                * revoked delegations are kept only for free_stateid.
-                */
-               return nfserr_bad_stateid;
-       mutex_lock(&stp->st_mutex);
+       status = nfsd4_lock_ol_stateid(stp);
+       if (status != nfs_ok)
+               return status;
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status == nfs_ok)
                status = nfs4_check_fh(current_fh, &stp->st_stid);
@@ -5294,7 +5440,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
        bool unhashed;
        LIST_HEAD(reaplist);
 
-       s->st_stid.sc_type = NFS4_CLOSED_STID;
        spin_lock(&clp->cl_lock);
        unhashed = unhash_open_stateid(s, &reaplist);
 
@@ -5334,10 +5479,17 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        nfsd4_bump_seqid(cstate, status);
        if (status)
                goto out; 
+
+       stp->st_stid.sc_type = NFS4_CLOSED_STID;
        nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-       mutex_unlock(&stp->st_mutex);
 
        nfsd4_close_open_stateid(stp);
+       mutex_unlock(&stp->st_mutex);
+
+       /* See RFC5661 sectionm 18.2.4 */
+       if (stp->st_stid.sc_client->cl_minorversion)
+               memcpy(&close->cl_stateid, &close_stateid,
+                               sizeof(close->cl_stateid));
 
        /* put reference from nfs4_preprocess_seqid_op */
        nfs4_put_stid(&stp->st_stid);
@@ -5363,7 +5515,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                goto out;
        dp = delegstateid(s);
-       status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
+       status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
        if (status)
                goto put_stateid;
 
@@ -5569,16 +5721,43 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
        return ret;
 }
 
-static void
+static struct nfs4_ol_stateid *
+find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
+{
+       struct nfs4_ol_stateid *lst;
+       struct nfs4_client *clp = lo->lo_owner.so_client;
+
+       lockdep_assert_held(&clp->cl_lock);
+
+       list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
+               if (lst->st_stid.sc_type != NFS4_LOCK_STID)
+                       continue;
+               if (lst->st_stid.sc_file == fp) {
+                       refcount_inc(&lst->st_stid.sc_count);
+                       return lst;
+               }
+       }
+       return NULL;
+}
+
+static struct nfs4_ol_stateid *
 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
                  struct nfs4_file *fp, struct inode *inode,
                  struct nfs4_ol_stateid *open_stp)
 {
        struct nfs4_client *clp = lo->lo_owner.so_client;
+       struct nfs4_ol_stateid *retstp;
 
-       lockdep_assert_held(&clp->cl_lock);
+       mutex_init(&stp->st_mutex);
+       mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+retry:
+       spin_lock(&clp->cl_lock);
+       spin_lock(&fp->fi_lock);
+       retstp = find_lock_stateid(lo, fp);
+       if (retstp)
+               goto out_unlock;
 
-       atomic_inc(&stp->st_stid.sc_count);
+       refcount_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_LOCK_STID;
        stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
        get_nfs4_file(fp);
@@ -5586,29 +5765,22 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
-       mutex_init(&stp->st_mutex);
        list_add(&stp->st_locks, &open_stp->st_locks);
        list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
-       spin_lock(&fp->fi_lock);
        list_add(&stp->st_perfile, &fp->fi_stateids);
+out_unlock:
        spin_unlock(&fp->fi_lock);
-}
-
-static struct nfs4_ol_stateid *
-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
-{
-       struct nfs4_ol_stateid *lst;
-       struct nfs4_client *clp = lo->lo_owner.so_client;
-
-       lockdep_assert_held(&clp->cl_lock);
-
-       list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
-               if (lst->st_stid.sc_file == fp) {
-                       atomic_inc(&lst->st_stid.sc_count);
-                       return lst;
+       spin_unlock(&clp->cl_lock);
+       if (retstp) {
+               if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+                       nfs4_put_stid(&retstp->st_stid);
+                       goto retry;
                }
+               /* To keep mutex tracking happy */
+               mutex_unlock(&stp->st_mutex);
+               stp = retstp;
        }
-       return NULL;
+       return stp;
 }
 
 static struct nfs4_ol_stateid *
@@ -5621,26 +5793,25 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
        struct nfs4_openowner *oo = openowner(ost->st_stateowner);
        struct nfs4_client *clp = oo->oo_owner.so_client;
 
+       *new = false;
        spin_lock(&clp->cl_lock);
        lst = find_lock_stateid(lo, fi);
-       if (lst == NULL) {
-               spin_unlock(&clp->cl_lock);
-               ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
-               if (ns == NULL)
-                       return NULL;
-
-               spin_lock(&clp->cl_lock);
-               lst = find_lock_stateid(lo, fi);
-               if (likely(!lst)) {
-                       lst = openlockstateid(ns);
-                       init_lock_stateid(lst, lo, fi, inode, ost);
-                       ns = NULL;
-                       *new = true;
-               }
-       }
        spin_unlock(&clp->cl_lock);
-       if (ns)
+       if (lst != NULL) {
+               if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
+                       goto out;
+               nfs4_put_stid(&lst->st_stid);
+       }
+       ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+       if (ns == NULL)
+               return NULL;
+
+       lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
+       if (lst == openlockstateid(ns))
+               *new = true;
+       else
                nfs4_put_stid(ns);
+out:
        return lst;
 }
 
@@ -5677,7 +5848,6 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
        struct nfs4_lockowner *lo;
        struct nfs4_ol_stateid *lst;
        unsigned int strhashval;
-       bool hashed;
 
        lo = find_lockowner_str(cl, &lock->lk_new_owner);
        if (!lo) {
@@ -5693,25 +5863,12 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
                        goto out;
        }
 
-retry:
        lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
        if (lst == NULL) {
                status = nfserr_jukebox;
                goto out;
        }
 
-       mutex_lock(&lst->st_mutex);
-
-       /* See if it's still hashed to avoid race with FREE_STATEID */
-       spin_lock(&cl->cl_lock);
-       hashed = !list_empty(&lst->st_perfile);
-       spin_unlock(&cl->cl_lock);
-
-       if (!hashed) {
-               mutex_unlock(&lst->st_mutex);
-               nfs4_put_stid(&lst->st_stid);
-               goto retry;
-       }
        status = nfs_ok;
        *plst = lst;
 out:
@@ -5917,14 +6074,16 @@ out:
                    seqid_mutating_err(ntohl(status)))
                        lock_sop->lo_owner.so_seqid++;
 
-               mutex_unlock(&lock_stp->st_mutex);
-
                /*
                 * If this is a new, never-before-used stateid, and we are
                 * returning an error, then just go ahead and release it.
                 */
-               if (status && new)
+               if (status && new) {
+                       lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
                        release_lock_stateid(lock_stp);
+               }
+
+               mutex_unlock(&lock_stp->st_mutex);
 
                nfs4_put_stid(&lock_stp->st_stid);
        }
@@ -6944,6 +7103,10 @@ static int nfs4_state_create_net(struct net *net)
                INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
        nn->conf_name_tree = RB_ROOT;
        nn->unconf_name_tree = RB_ROOT;
+       nn->boot_time = get_seconds();
+       nn->grace_ended = false;
+       nn->nfsd4_manager.block_opens = true;
+       INIT_LIST_HEAD(&nn->nfsd4_manager.list);
        INIT_LIST_HEAD(&nn->client_lru);
        INIT_LIST_HEAD(&nn->close_lru);
        INIT_LIST_HEAD(&nn->del_recall_lru);
@@ -7001,13 +7164,10 @@ nfs4_state_start_net(struct net *net)
        ret = nfs4_state_create_net(net);
        if (ret)
                return ret;
-       nn->boot_time = get_seconds();
-       nn->grace_ended = false;
-       nn->nfsd4_manager.block_opens = true;
        locks_start_grace(net, &nn->nfsd4_manager);
        nfsd4_client_tracking_init(net);
-       printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
-              nn->nfsd4_grace, net);
+       printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
+              nn->nfsd4_grace, net->ns.inum);
        queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
        return 0;
 }
@@ -7080,7 +7240,7 @@ nfs4_state_shutdown_net(struct net *net)
        spin_unlock(&nn->blocked_locks_lock);
 
        while (!list_empty(&reaplist)) {
-               nbl = list_first_entry(&nn->blocked_locks_lru,
+               nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
                posix_unblock_lock(&nbl->nbl_lock);
index 6493df6b1bd5f192646d1f63b2230af840833651..d107b4426f7eb15443188e8538c4b6e8e99d4fa8 100644 (file)
@@ -1241,6 +1241,9 @@ static __net_init int nfsd_init_net(struct net *net)
        nn->nfsd4_grace = 90;
        nn->clverifier_counter = prandom_u32();
        nn->clientid_counter = prandom_u32();
+
+       atomic_set(&nn->ntf_refcnt, 0);
+       init_waitqueue_head(&nn->ntf_wq);
        return 0;
 
 out_idmap_error:
index e02bd278312463af174de08b017c445eb4e86b5b..89cb484f1cfbeccde41d872298c7e08aaadd5b87 100644 (file)
@@ -335,7 +335,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct sockaddr_in sin;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nn->ntf_refcnt))
                goto out;
 
        if (nn->nfsd_serv) {
@@ -344,6 +345,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
                sin.sin_addr.s_addr = ifa->ifa_local;
                svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
        }
+       atomic_dec(&nn->ntf_refcnt);
+       wake_up(&nn->ntf_wq);
 
 out:
        return NOTIFY_DONE;
@@ -363,7 +366,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct sockaddr_in6 sin6;
 
-       if (event != NETDEV_DOWN)
+       if ((event != NETDEV_DOWN) ||
+           !atomic_inc_not_zero(&nn->ntf_refcnt))
                goto out;
 
        if (nn->nfsd_serv) {
@@ -374,7 +378,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
                        sin6.sin6_scope_id = ifa->idev->dev->ifindex;
                svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
        }
-
+       atomic_dec(&nn->ntf_refcnt);
+       wake_up(&nn->ntf_wq);
 out:
        return NOTIFY_DONE;
 }
@@ -391,6 +396,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
+       atomic_dec(&nn->ntf_refcnt);
        /* check if the notifier still has clients */
        if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
                unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -398,6 +404,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
                unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
        }
+       wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
 
        /*
         * write_ports can create the server without actually starting
@@ -447,7 +454,7 @@ void nfsd_reset_versions(void)
  */
 static void set_max_drc(void)
 {
-       #define NFSD_DRC_SIZE_SHIFT     10
+       #define NFSD_DRC_SIZE_SHIFT     7
        nfsd_drc_max_mem = (nr_free_buffer_pages()
                                        >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
        nfsd_drc_mem_used = 0;
@@ -517,7 +524,8 @@ int nfsd_create_serv(struct net *net)
                register_inet6addr_notifier(&nfsd_inet6addr_notifier);
 #endif
        }
-       do_gettimeofday(&nn->nfssvc_boot);              /* record boot time */
+       atomic_inc(&nn->ntf_refcnt);
+       ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */
        return 0;
 }
 
index 005c911b34ac4553a2c02da05b4e5d975b660710..f3772ea8ba0d394f95c302584093d57fc19e37d7 100644 (file)
@@ -36,6 +36,7 @@
 #define _NFSD4_STATE_H
 
 #include <linux/idr.h>
+#include <linux/refcount.h>
 #include <linux/sunrpc/svc_xprt.h>
 #include "nfsfh.h"
 
@@ -83,7 +84,7 @@ struct nfsd4_callback_ops {
  * fields that are of general use to any stateid.
  */
 struct nfs4_stid {
-       atomic_t                sc_count;
+       refcount_t              sc_count;
 #define NFS4_OPEN_STID 1
 #define NFS4_LOCK_STID 2
 #define NFS4_DELEG_STID 4
@@ -169,11 +170,13 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
 struct nfsd4_slot {
        u32     sl_seqid;
        __be32  sl_status;
+       struct svc_cred sl_cred;
        u32     sl_datalen;
        u16     sl_opcnt;
 #define NFSD4_SLOT_INUSE       (1 << 0)
 #define NFSD4_SLOT_CACHETHIS   (1 << 1)
 #define NFSD4_SLOT_INITIALIZED (1 << 2)
+#define NFSD4_SLOT_CACHED      (1 << 3)
        u8      sl_flags;
        char    sl_data[];
 };
@@ -465,7 +468,7 @@ struct nfs4_clnt_odstate {
        struct nfs4_client      *co_client;
        struct nfs4_file        *co_file;
        struct list_head        co_perfile;
-       atomic_t                co_odcount;
+       refcount_t              co_odcount;
 };
 
 /*
@@ -481,7 +484,7 @@ struct nfs4_clnt_odstate {
  * the global state_lock spinlock.
  */
 struct nfs4_file {
-       atomic_t                fi_ref;
+       refcount_t              fi_ref;
        spinlock_t              fi_lock;
        struct hlist_node       fi_hash;        /* hash on fi_fhandle */
        struct list_head        fi_stateids;
@@ -634,7 +637,7 @@ struct nfs4_file *find_file(struct knfsd_fh *fh);
 void put_nfs4_file(struct nfs4_file *fi);
 static inline void get_nfs4_file(struct nfs4_file *fi)
 {
-       atomic_inc(&fi->fi_ref);
+       refcount_inc(&fi->fi_ref);
 }
 struct file *find_any_file(struct nfs4_file *f);
 
index 1e4edbf70052bf5f6c8c0af06e0d7bf1812e80a5..bc29511b6405275522a09db2c596991cdd99e710 100644 (file)
@@ -649,9 +649,18 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
        return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
 }
 
-static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
+/*
+ * The session reply cache only needs to cache replies that the client
+ * actually asked us to.  But it's almost free for us to cache compounds
+ * consisting of only a SEQUENCE op, so we may as well cache those too.
+ * Also, the protocol doesn't give us a convenient response in the case
+ * of a replay of a solo SEQUENCE op that wasn't cached
+ * (RETRY_UNCACHED_REP can only be returned in the second op of a
+ * compound).
+ */
+static inline bool nfsd4_cache_this(struct nfsd4_compoundres *resp)
 {
-       return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
+       return (resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
                || nfsd4_is_solo_sequence(resp);
 }
 
index 515d13c196daf81f69dc0b5501b4b9780c8d2743..1a2894aa01942597e05dba587d33d42bd2da7c0a 100644 (file)
@@ -150,7 +150,7 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry,
        if (err)
                return err;
 
-       inode = nilfs_new_inode(dir, S_IFLNK | S_IRWXUGO);
+       inode = nilfs_new_inode(dir, S_IFLNK | 0777);
        err = PTR_ERR(inode);
        if (IS_ERR(inode))
                goto out;
index f65392fecb5c7f712e8262095aa8261577d035de..9f3ffba41533ebf610b0a8649f31892ebab95be3 100644 (file)
@@ -1954,8 +1954,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
                                          err, ii->vfs_inode.i_ino);
                                return err;
                        }
-                       mark_buffer_dirty(ibh);
-                       nilfs_mdt_mark_dirty(ifile);
                        spin_lock(&nilfs->ns_inode_lock);
                        if (likely(!ii->i_bh))
                                ii->i_bh = ibh;
@@ -1964,6 +1962,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
                        goto retry;
                }
 
+               // Always redirty the buffer to avoid race condition
+               mark_buffer_dirty(ii->i_bh);
+               nilfs_mdt_mark_dirty(ifile);
+
                clear_bit(NILFS_I_QUEUED, &ii->i_state);
                set_bit(NILFS_I_BUSY, &ii->i_state);
                list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
@@ -1977,7 +1979,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                                             struct the_nilfs *nilfs)
 {
        struct nilfs_inode_info *ii, *n;
-       int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+       int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
        int defer_iput = false;
 
        spin_lock(&nilfs->ns_inode_lock);
@@ -2400,11 +2402,11 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
        return err;
 }
 
-static void nilfs_construction_timeout(unsigned long data)
+static void nilfs_construction_timeout(struct timer_list *t)
 {
-       struct task_struct *p = (struct task_struct *)data;
+       struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
 
-       wake_up_process(p);
+       wake_up_process(sci->sc_timer_task);
 }
 
 static void
@@ -2542,8 +2544,7 @@ static int nilfs_segctor_thread(void *arg)
        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
        int timeout = 0;
 
-       sci->sc_timer.data = (unsigned long)current;
-       sci->sc_timer.function = nilfs_construction_timeout;
+       sci->sc_timer_task = current;
 
        /* start sync. */
        sci->sc_task = current;
@@ -2674,7 +2675,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
        INIT_LIST_HEAD(&sci->sc_gc_inodes);
        INIT_LIST_HEAD(&sci->sc_iput_queue);
        INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
-       init_timer(&sci->sc_timer);
+       timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
 
        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
        sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
index 1060949d7dd2a6119a7cd814590b698646ae33a7..84084a4d9b3e83a498fc2ea21bb2d59fb7928d9e 100644 (file)
@@ -180,6 +180,7 @@ struct nilfs_sc_info {
        unsigned long           sc_watermark;
 
        struct timer_list       sc_timer;
+       struct task_struct     *sc_timer_task;
        struct task_struct     *sc_task;
 };
 
index 1541a1e9221a5caeabc56423cebee6a4e084a516..1341a41e7b43aece4c05a468568b251d4f12b666 100644 (file)
@@ -630,22 +630,22 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
 }
 
 /**
 * nilfs_sufile_truncate_range - truncate range of segment array
 * @sufile: inode of segment usage file
 * @start: start segment number (inclusive)
 * @end: end segment number (inclusive)
 *
 * Return Value: On success, 0 is returned.  On error, one of the
 * following negative error codes is returned.
 *
 * %-EIO - I/O error.
 *
 * %-ENOMEM - Insufficient amount of memory available.
 *
 * %-EINVAL - Invalid number of segments specified
 *
 * %-EBUSY - Dirty or active segments are present in the range
 */
+ * nilfs_sufile_truncate_range - truncate range of segment array
+ * @sufile: inode of segment usage file
+ * @start: start segment number (inclusive)
+ * @end: end segment number (inclusive)
+ *
+ * Return Value: On success, 0 is returned.  On error, one of the
+ * following negative error codes is returned.
+ *
+ * %-EIO - I/O error.
+ *
+ * %-ENOMEM - Insufficient amount of memory available.
+ *
+ * %-EINVAL - Invalid number of segments specified
+ *
+ * %-EBUSY - Dirty or active segments are present in the range
+ */
 static int nilfs_sufile_truncate_range(struct inode *sufile,
                                       __u64 start, __u64 end)
 {
index 4fc018dfcfae354b598d2a547b96adf1ae1c76c0..3073b646e1bacf7c33aa8fd458ca8dfd7cb30884 100644 (file)
@@ -141,7 +141,7 @@ void __nilfs_error(struct super_block *sb, const char *function,
 
                if (nilfs_test_opt(nilfs, ERRORS_RO)) {
                        printk(KERN_CRIT "Remounting filesystem read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
        }
 
@@ -160,7 +160,6 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
        ii->i_bh = NULL;
        ii->i_state = 0;
        ii->i_cno = 0;
-       ii->vfs_inode.i_version = 1;
        nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
        return &ii->vfs_inode;
 }
@@ -870,7 +869,7 @@ int nilfs_store_magic_and_option(struct super_block *sb,
 
        /* FS independent flags */
 #ifdef NILFS_ATIME_DISABLE
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
 #endif
 
        nilfs_set_default_options(sb, sbp);
@@ -1134,7 +1133,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                err = -EINVAL;
                goto restore_opts;
        }
-       sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
+       sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
 
        err = -EINVAL;
 
@@ -1144,12 +1143,12 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out;
-       if (*flags & MS_RDONLY) {
+       if (*flags & SB_RDONLY) {
                /* Shutting down log writer */
                nilfs_detach_log_writer(sb);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
 
                /*
                 * Remounting a valid RW partition RDONLY, so set
@@ -1179,7 +1178,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                        goto restore_opts;
                }
 
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 
                root = NILFS_I(d_inode(sb->s_root))->i_root;
                err = nilfs_attach_log_writer(sb, root);
@@ -1213,7 +1212,7 @@ static int nilfs_parse_snapshot_option(const char *option,
        const char *msg = NULL;
        int err;
 
-       if (!(sd->flags & MS_RDONLY)) {
+       if (!(sd->flags & SB_RDONLY)) {
                msg = "read-only option is not specified";
                goto parse_error;
        }
@@ -1287,7 +1286,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
        struct dentry *root_dentry;
        int err, s_new = false;
 
-       if (!(flags & MS_RDONLY))
+       if (!(flags & SB_RDONLY))
                mode |= FMODE_WRITE;
 
        sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
@@ -1328,14 +1327,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
                snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
                sb_set_blocksize(s, block_size(sd.bdev));
 
-               err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
+               err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
                if (err)
                        goto failed_super;
 
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
        } else if (!sd.cno) {
                if (nilfs_tree_is_busy(s->s_root)) {
-                       if ((flags ^ s->s_flags) & MS_RDONLY) {
+                       if ((flags ^ s->s_flags) & SB_RDONLY) {
                                nilfs_msg(s, KERN_ERR,
                                          "the device already has a %s mount.",
                                          sb_rdonly(s) ? "read-only" : "read/write");
index 2dd75bf619ad0e1167a62e90ef59258d2ee88e26..1a85317e83f0f751332118daf559fc9ba79fb560 100644 (file)
@@ -220,7 +220,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
 
        if (!valid_fs) {
                nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
-               if (s_flags & MS_RDONLY) {
+               if (s_flags & SB_RDONLY) {
                        nilfs_msg(sb, KERN_INFO,
                                  "recovery required for readonly filesystem");
                        nilfs_msg(sb, KERN_INFO,
@@ -286,7 +286,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
        if (valid_fs)
                goto skip_recovery;
 
-       if (s_flags & MS_RDONLY) {
+       if (s_flags & SB_RDONLY) {
                __u64 features;
 
                if (nilfs_test_opt(nilfs, NORECOVERY)) {
@@ -309,7 +309,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
                        err = -EROFS;
                        goto failed_unload;
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
        } else if (nilfs_test_opt(nilfs, NORECOVERY)) {
                nilfs_msg(sb, KERN_ERR,
                          "recovery cancelled because norecovery option was specified for a read/write mount");
@@ -737,7 +737,7 @@ struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno)
                } else if (cno > root->cno) {
                        n = n->rb_right;
                } else {
-                       atomic_inc(&root->count);
+                       refcount_inc(&root->count);
                        spin_unlock(&nilfs->ns_cptree_lock);
                        return root;
                }
@@ -776,7 +776,7 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
                } else if (cno > root->cno) {
                        p = &(*p)->rb_right;
                } else {
-                       atomic_inc(&root->count);
+                       refcount_inc(&root->count);
                        spin_unlock(&nilfs->ns_cptree_lock);
                        kfree(new);
                        return root;
@@ -786,7 +786,7 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
        new->cno = cno;
        new->ifile = NULL;
        new->nilfs = nilfs;
-       atomic_set(&new->count, 1);
+       refcount_set(&new->count, 1);
        atomic64_set(&new->inodes_count, 0);
        atomic64_set(&new->blocks_count, 0);
 
@@ -806,7 +806,7 @@ nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
 
 void nilfs_put_root(struct nilfs_root *root)
 {
-       if (atomic_dec_and_test(&root->count)) {
+       if (refcount_dec_and_test(&root->count)) {
                struct the_nilfs *nilfs = root->nilfs;
 
                nilfs_sysfs_delete_snapshot_group(root);
index b305c6f033e7c46ea11cbf7481135375b38f2d9b..883d732b02595012212b05efea83af61556c8bd2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
 #include <linux/slab.h>
+#include <linux/refcount.h>
 
 struct nilfs_sc_info;
 struct nilfs_sysfs_dev_subgroups;
@@ -246,7 +247,7 @@ struct nilfs_root {
        __u64 cno;
        struct rb_node rb_node;
 
-       atomic_t count;
+       refcount_t count;
        struct the_nilfs *nilfs;
        struct inode *ifile;
 
@@ -299,7 +300,7 @@ void nilfs_swap_super_block(struct the_nilfs *);
 
 static inline void nilfs_get_root(struct nilfs_root *root)
 {
-       atomic_inc(&root->count);
+       refcount_inc(&root->count);
 }
 
 static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
index 81d8959b6aef9c6595509b35efaede5ef1ebf3b5..219b269c737e6e0d85f05fb142e712c918d34dfc 100644 (file)
@@ -67,7 +67,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
 
                /*
                 * If i_count is zero, the inode cannot have any watches and
-                * doing an __iget/iput with MS_ACTIVE clear would actually
+                * doing an __iget/iput with SB_ACTIVE clear would actually
                 * evict all inodes with zero i_count from icache which is
                 * unnecessarily violent and may in fact be illegal to do.
                 */
index ef243e14b6ebd2d8c57113ced959fb2f2a772a6f..7c6f76d29f5649bfcfa446b51c88ceaffe9e356a 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -255,5 +255,5 @@ void __init nsfs_init(void)
        nsfs_mnt = kern_mount(&nsfs);
        if (IS_ERR(nsfs_mnt))
                panic("can't set nsfs up\n");
-       nsfs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+       nsfs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
 }
index 3f70f041dbe9d7e0f79b5970da1d7b5d14ca584e..bb7159f697f2f3f45713ef26a327b1339f1a30bc 100644 (file)
@@ -473,7 +473,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
 
 #ifndef NTFS_RW
        /* For read-only compiled driver, enforce read-only flag. */
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
 #else /* NTFS_RW */
        /*
         * For the read-write compiled driver, if we are remounting read-write,
@@ -487,7 +487,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
         * When remounting read-only, mark the volume clean if no volume errors
         * have occurred.
         */
-       if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+       if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
                static const char *es = ".  Cannot remount read-write.";
 
                /* Remounting read-write. */
@@ -548,7 +548,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
                        NVolSetErrors(vol);
                        return -EROFS;
                }
-       } else if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+       } else if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
                /* Remounting read-only. */
                if (!NVolErrors(vol)) {
                        if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
@@ -1799,7 +1799,7 @@ static bool load_system_files(ntfs_volume *vol)
                                                es3);
                                goto iput_mirr_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s",
                                        !vol->mftmirr_ino ? es1 : es2, es3);
                } else
@@ -1937,7 +1937,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_vol_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -1974,7 +1974,7 @@ get_ctx_vol_failed:
                                }
                                goto iput_logfile_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2019,7 +2019,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_root_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2042,7 +2042,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                /*
                 * Do not set NVolErrors() because ntfs_remount() might manage
                 * to set the dirty flag in which case all would be well.
@@ -2055,7 +2055,7 @@ get_ctx_vol_failed:
         * If (still) a read-write mount, set the NT4 compatibility flag on
         * newer NTFS version volumes.
         */
-       if (!(sb->s_flags & MS_RDONLY) && (vol->major_ver > 1) &&
+       if (!(sb->s_flags & SB_RDONLY) && (vol->major_ver > 1) &&
                        ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
                static const char *es1 = "Failed to set NT4 compatibility flag";
                static const char *es2 = ".  Run chkdsk.";
@@ -2069,7 +2069,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif
@@ -2087,7 +2087,7 @@ get_ctx_vol_failed:
                        goto iput_root_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif /* NTFS_RW */
@@ -2128,7 +2128,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_quota_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2150,7 +2150,7 @@ get_ctx_vol_failed:
                        goto iput_quota_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
        /*
@@ -2171,7 +2171,7 @@ get_ctx_vol_failed:
                                                es1, es2);
                                goto iput_usnjrnl_err_out;
                        }
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
                } else
                        ntfs_warning(sb, "%s.  Will not be able to remount "
@@ -2194,7 +2194,7 @@ get_ctx_vol_failed:
                        goto iput_usnjrnl_err_out;
                }
                ntfs_error(sb, "%s.  Mounting read-only%s", es1, es2);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                NVolSetErrors(vol);
        }
 #endif /* NTFS_RW */
@@ -2728,7 +2728,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
        lockdep_off();
        ntfs_debug("Entering.");
 #ifndef NTFS_RW
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
 #endif /* ! NTFS_RW */
        /* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
        sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
index 8d779227370ab1d121fdc2fc6f546f7844e95a5b..bebe59feca5873a766cd28becd269cc576cdfc6d 100644 (file)
@@ -140,7 +140,7 @@ static void o2net_rx_until_empty(struct work_struct *work);
 static void o2net_shutdown_sc(struct work_struct *work);
 static void o2net_listen_data_ready(struct sock *sk);
 static void o2net_sc_send_keep_req(struct work_struct *work);
-static void o2net_idle_timer(unsigned long data);
+static void o2net_idle_timer(struct timer_list *t);
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
 
@@ -450,8 +450,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
        INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
        INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
 
-       setup_timer(&sc->sc_idle_timeout, o2net_idle_timer,
-                   (unsigned long)sc);
+       timer_setup(&sc->sc_idle_timeout, o2net_idle_timer, 0);
 
        sclog(sc, "alloced\n");
 
@@ -1517,9 +1516,9 @@ static void o2net_sc_send_keep_req(struct work_struct *work)
 /* socket shutdown does a del_timer_sync against this as it tears down.
  * we can't start this timer until we've got to the point in sc buildup
  * where shutdown is going to be involved */
-static void o2net_idle_timer(unsigned long data)
+static void o2net_idle_timer(struct timer_list *t)
 {
-       struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
+       struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 #ifdef CONFIG_DEBUG_FS
        unsigned long msecs = ktime_to_ms(ktime_get()) -
index dc455d45a66aed68bd148cf438081ef3f4b4af02..a1d05105547267df1a9f136cf34f2e090fac99b7 100644 (file)
@@ -227,7 +227,7 @@ int ocfs2_should_update_atime(struct inode *inode,
                return 0;
 
        if ((inode->i_flags & S_NOATIME) ||
-           ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
+           ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
                return 0;
 
        /*
index 040bbb6a6e4b80fd4fa313f7a0b014505343f95d..80efa5699fb0c3db95c3f6490feed61ada1f3f90 100644 (file)
@@ -675,9 +675,9 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
        }
 
        /* We're going to/from readonly mode. */
-       if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+       if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
                /* Disable quota accounting before remounting RO */
-               if (*flags & MS_RDONLY) {
+               if (*flags & SB_RDONLY) {
                        ret = ocfs2_susp_quotas(osb, 0);
                        if (ret < 0)
                                goto out;
@@ -691,8 +691,8 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                        goto unlock_osb;
                }
 
-               if (*flags & MS_RDONLY) {
-                       sb->s_flags |= MS_RDONLY;
+               if (*flags & SB_RDONLY) {
+                       sb->s_flags |= SB_RDONLY;
                        osb->osb_flags |= OCFS2_OSB_SOFT_RO;
                } else {
                        if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
@@ -709,14 +709,14 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                                ret = -EINVAL;
                                goto unlock_osb;
                        }
-                       sb->s_flags &= ~MS_RDONLY;
+                       sb->s_flags &= ~SB_RDONLY;
                        osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
                }
                trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
 unlock_osb:
                spin_unlock(&osb->osb_lock);
                /* Enable quota accounting after remounting RW */
-               if (!ret && !(*flags & MS_RDONLY)) {
+               if (!ret && !(*flags & SB_RDONLY)) {
                        if (sb_any_quota_suspended(sb))
                                ret = ocfs2_susp_quotas(osb, 1);
                        else
@@ -724,7 +724,7 @@ unlock_osb:
                        if (ret < 0) {
                                /* Return back changes... */
                                spin_lock(&osb->osb_lock);
-                               sb->s_flags |= MS_RDONLY;
+                               sb->s_flags |= SB_RDONLY;
                                osb->osb_flags |= OCFS2_OSB_SOFT_RO;
                                spin_unlock(&osb->osb_lock);
                                goto out;
@@ -744,9 +744,9 @@ unlock_osb:
                if (!ocfs2_is_hard_readonly(osb))
                        ocfs2_set_journal_params(osb);
 
-               sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+               sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
                        ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
-                                                       MS_POSIXACL : 0);
+                                                       SB_POSIXACL : 0);
        }
 out:
        return ret;
@@ -1057,10 +1057,10 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic = OCFS2_SUPER_MAGIC;
 
-       sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
-               ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+       sb->s_flags = (sb->s_flags & ~(SB_POSIXACL | SB_NOSEC)) |
+               ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
 
-       /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
+       /* Hard readonly mode only if: bdev_read_only, SB_RDONLY,
         * heartbeat=none */
        if (bdev_read_only(sb->s_bdev)) {
                if (!sb_rdonly(sb)) {
@@ -2057,7 +2057,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
        sb->s_xattr = ocfs2_xattr_handlers;
        sb->s_time_gran = 1;
-       sb->s_flags |= MS_NOATIME;
+       sb->s_flags |= SB_NOATIME;
        /* this is needed to support O_LARGEFILE */
        cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
        bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
@@ -2568,7 +2568,7 @@ static int ocfs2_handle_error(struct super_block *sb)
                        return rv;
 
                pr_crit("OCFS2: File system is now read-only.\n");
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
                ocfs2_set_ro_flag(osb, 0);
        }
 
index 5fdf269ba82e393c1bff315b260ec45b7a1787b2..c5898c59d4118d8dbd276f32bdf65e23eff52919 100644 (file)
@@ -901,7 +901,7 @@ static int ocfs2_xattr_list_entry(struct super_block *sb,
 
        case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS:
        case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT:
-               if (!(sb->s_flags & MS_POSIXACL))
+               if (!(sb->s_flags & SB_POSIXACL))
                        return 0;
                break;
 
index 13215f26e321902fde7cd0143763146f93df82ab..2200662a9bf186ae54dfc92573cfe00f589c1858 100644 (file)
@@ -369,7 +369,7 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
 static int openprom_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_NOATIME;
+       *flags |= SB_NOATIME;
        return 0;
 }
 
@@ -386,7 +386,7 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
        struct op_inode_info *oi;
        int ret;
 
-       s->s_flags |= MS_NOATIME;
+       s->s_flags |= SB_NOATIME;
        s->s_blocksize = 1024;
        s->s_blocksize_bits = 10;
        s->s_magic = OPENPROM_SUPER_MAGIC;
index c2d8233b1e826cc99abb615d55c1b590bc89d0b8..480ea059a6802785c78a291c14e7ca01abdcf548 100644 (file)
@@ -155,13 +155,11 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 
 int orangefs_init_acl(struct inode *inode, struct inode *dir)
 {
-       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
        struct posix_acl *default_acl, *acl;
        umode_t mode = inode->i_mode;
+       struct iattr iattr;
        int error = 0;
 
-       ClearModeFlag(orangefs_inode);
-
        error = posix_acl_create(dir, &mode, &default_acl, &acl);
        if (error)
                return error;
@@ -180,9 +178,11 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir)
 
        /* If mode of the inode was changed, then do a forcible ->setattr */
        if (mode != inode->i_mode) {
-               SetModeFlag(orangefs_inode);
+               memset(&iattr, 0, sizeof iattr);
                inode->i_mode = mode;
-               orangefs_flush_inode(inode);
+               iattr.ia_mode = mode;
+               iattr.ia_valid |= ATTR_MODE;
+               orangefs_inode_setattr(inode, &iattr);
        }
 
        return error;
index a8cc588d6224f3b48c2e9be7f7881612889ef2bd..e2c2699d8016274dbe2225b14c0ab8828ceae00f 100644 (file)
@@ -386,7 +386,6 @@ static int orangefs_dir_release(struct inode *inode, struct file *file)
 {
        struct orangefs_dir *od = file->private_data;
        struct orangefs_dir_part *part = od->part;
-       orangefs_flush_inode(inode);
        while (part) {
                struct orangefs_dir_part *next = part->next;
                vfree(part);
index e4a8e6a7eb17b6aaa4b0ab5bf34e16b514c0ea82..1668fd645c453609473f9d6ab89b30499300d940 100644 (file)
@@ -383,9 +383,15 @@ out:
                if (type == ORANGEFS_IO_READ) {
                        file_accessed(file);
                } else {
-                       SetMtimeFlag(orangefs_inode);
-                       inode->i_mtime = current_time(inode);
-                       mark_inode_dirty_sync(inode);
+                       file_update_time(file);
+                       /*
+                        * Must invalidate to ensure write loop doesn't
+                        * prevent kernel from reading updated
+                        * attribute.  Size probably changed because of
+                        * the write, and other clients could update
+                        * any other attribute.
+                        */
+                       orangefs_inode->getattr_time = jiffies - 1;
                }
        }
 
@@ -615,8 +621,6 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
                     "orangefs_file_release: called on %pD\n",
                     file);
 
-       orangefs_flush_inode(inode);
-
        /*
         * remove all associated inode pages from the page cache and
         * readahead cache (if any); this forces an expensive refresh of
@@ -666,8 +670,6 @@ static int orangefs_fsync(struct file *file,
                     ret);
 
        op_release(new_op);
-
-       orangefs_flush_inode(file_inode(file));
        return ret;
 }
 
index 28825a5b6d098f5fbfad1741e0476f7601f41a56..fe1d705ad91fac90c8d496a1c78611e42da9d1d1 100644 (file)
@@ -290,6 +290,22 @@ int orangefs_permission(struct inode *inode, int mask)
        return generic_permission(inode, mask);
 }
 
+int orangefs_update_time(struct inode *inode, struct timespec *time, int flags)
+{
+       struct iattr iattr;
+       gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
+           get_khandle_from_ino(inode));
+       generic_update_time(inode, time, flags);
+       memset(&iattr, 0, sizeof iattr);
+        if (flags & S_ATIME)
+               iattr.ia_valid |= ATTR_ATIME;
+       if (flags & S_CTIME)
+               iattr.ia_valid |= ATTR_CTIME;
+       if (flags & S_MTIME)
+               iattr.ia_valid |= ATTR_MTIME;
+       return orangefs_inode_setattr(inode, &iattr);
+}
+
 /* ORANGEDS2 implementation of VFS inode operations for files */
 const struct inode_operations orangefs_file_inode_operations = {
        .get_acl = orangefs_get_acl,
@@ -298,6 +314,7 @@ const struct inode_operations orangefs_file_inode_operations = {
        .getattr = orangefs_getattr,
        .listxattr = orangefs_listxattr,
        .permission = orangefs_permission,
+       .update_time = orangefs_update_time,
 };
 
 static int orangefs_init_iops(struct inode *inode)
index 7e9e5d0ea3bc24a9b8f270497c3319a1f5bd0848..c98bba2dbc94c3547782f7f485bbdd4793d354f4 100644 (file)
@@ -22,7 +22,9 @@ static int orangefs_create(struct inode *dir,
 {
        struct orangefs_inode_s *parent = ORANGEFS_I(dir);
        struct orangefs_kernel_op_s *new_op;
+       struct orangefs_object_kref ref;
        struct inode *inode;
+       struct iattr iattr;
        int ret;
 
        gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd\n",
@@ -55,8 +57,10 @@ static int orangefs_create(struct inode *dir,
        if (ret < 0)
                goto out;
 
-       inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0,
-                               &new_op->downcall.resp.create.refn);
+       ref = new_op->downcall.resp.create.refn;
+       op_release(new_op);
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, &ref);
        if (IS_ERR(inode)) {
                gossip_err("%s: Failed to allocate inode for file :%pd:\n",
                           __func__,
@@ -82,12 +86,13 @@ static int orangefs_create(struct inode *dir,
                     __func__,
                     dentry);
 
-       SetMtimeFlag(parent);
        dir->i_mtime = dir->i_ctime = current_time(dir);
+       memset(&iattr, 0, sizeof iattr);
+       iattr.ia_valid |= ATTR_MTIME;
+       orangefs_inode_setattr(dir, &iattr);
        mark_inode_dirty_sync(dir);
        ret = 0;
 out:
-       op_release(new_op);
        gossip_debug(GOSSIP_NAME_DEBUG,
                     "%s: %pd: returning %d\n",
                     __func__,
@@ -221,6 +226,7 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
        struct inode *inode = dentry->d_inode;
        struct orangefs_inode_s *parent = ORANGEFS_I(dir);
        struct orangefs_kernel_op_s *new_op;
+       struct iattr iattr;
        int ret;
 
        gossip_debug(GOSSIP_NAME_DEBUG,
@@ -253,8 +259,10 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
        if (!ret) {
                drop_nlink(inode);
 
-               SetMtimeFlag(parent);
                dir->i_mtime = dir->i_ctime = current_time(dir);
+               memset(&iattr, 0, sizeof iattr);
+               iattr.ia_valid |= ATTR_MTIME;
+               orangefs_inode_setattr(dir, &iattr);
                mark_inode_dirty_sync(dir);
        }
        return ret;
@@ -266,7 +274,9 @@ static int orangefs_symlink(struct inode *dir,
 {
        struct orangefs_inode_s *parent = ORANGEFS_I(dir);
        struct orangefs_kernel_op_s *new_op;
+       struct orangefs_object_kref ref;
        struct inode *inode;
+       struct iattr iattr;
        int mode = 755;
        int ret;
 
@@ -307,8 +317,10 @@ static int orangefs_symlink(struct inode *dir,
                goto out;
        }
 
-       inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0,
-                               &new_op->downcall.resp.sym.refn);
+       ref = new_op->downcall.resp.sym.refn;
+       op_release(new_op);
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, &ref);
        if (IS_ERR(inode)) {
                gossip_err
                    ("*** Failed to allocate orangefs symlink inode\n");
@@ -331,12 +343,13 @@ static int orangefs_symlink(struct inode *dir,
                     get_khandle_from_ino(inode),
                     dentry);
 
-       SetMtimeFlag(parent);
        dir->i_mtime = dir->i_ctime = current_time(dir);
+       memset(&iattr, 0, sizeof iattr);
+       iattr.ia_valid |= ATTR_MTIME;
+       orangefs_inode_setattr(dir, &iattr);
        mark_inode_dirty_sync(dir);
        ret = 0;
 out:
-       op_release(new_op);
        return ret;
 }
 
@@ -344,7 +357,9 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
 {
        struct orangefs_inode_s *parent = ORANGEFS_I(dir);
        struct orangefs_kernel_op_s *new_op;
+       struct orangefs_object_kref ref;
        struct inode *inode;
+       struct iattr iattr;
        int ret;
 
        new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR);
@@ -373,8 +388,10 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
                goto out;
        }
 
-       inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0,
-                               &new_op->downcall.resp.mkdir.refn);
+       ref = new_op->downcall.resp.mkdir.refn;
+       op_release(new_op);
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, &ref);
        if (IS_ERR(inode)) {
                gossip_err("*** Failed to allocate orangefs dir inode\n");
                ret = PTR_ERR(inode);
@@ -400,11 +417,12 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
         * NOTE: we have no good way to keep nlink consistent for directories
         * across clients; keep constant at 1.
         */
-       SetMtimeFlag(parent);
        dir->i_mtime = dir->i_ctime = current_time(dir);
+       memset(&iattr, 0, sizeof iattr);
+       iattr.ia_valid |= ATTR_MTIME;
+       orangefs_inode_setattr(dir, &iattr);
        mark_inode_dirty_sync(dir);
 out:
-       op_release(new_op);
        return ret;
 }
 
@@ -470,4 +488,5 @@ const struct inode_operations orangefs_dir_inode_operations = {
        .getattr = orangefs_getattr,
        .listxattr = orangefs_listxattr,
        .permission = orangefs_permission,
+       .update_time = orangefs_update_time,
 };
index b6001bb28f5a94f6d86a17a76b3245525748be9c..c7db56a31b9209076975f6380aad69761cccfde9 100644 (file)
 
 #ifdef __KERNEL__
 #include <linux/types.h>
+#include <linux/kernel.h>
 #else
 #include <stdint.h>
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
 #endif
 
 #define        GOSSIP_NO_DEBUG                 (__u64)0
@@ -88,6 +90,6 @@ static struct __keyword_mask_s s_kmod_keyword_mask_map[] = {
 };
 
 static const int num_kmod_keyword_mask_map = (int)
-       (sizeof(s_kmod_keyword_mask_map) / sizeof(struct __keyword_mask_s));
+       (ARRAY_SIZE(s_kmod_keyword_mask_map));
 
 #endif /* __ORANGEFS_DEBUG_H */
index f44d5eb74fcc84f37fe69606f63b6a50dd171dba..97adf7d100b5fe498f70345d1fcdfc525f1d2309 100644 (file)
@@ -209,37 +209,10 @@ struct orangefs_inode_s {
        struct inode vfs_inode;
        sector_t last_failed_block_index_read;
 
-       /*
-        * State of in-memory attributes not yet flushed to disk associated
-        * with this object
-        */
-       unsigned long pinode_flags;
-
        unsigned long getattr_time;
        u32 getattr_mask;
 };
 
-#define P_ATIME_FLAG 0
-#define P_MTIME_FLAG 1
-#define P_CTIME_FLAG 2
-#define P_MODE_FLAG  3
-
-#define ClearAtimeFlag(pinode) clear_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
-#define SetAtimeFlag(pinode)   set_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
-#define AtimeFlag(pinode)      test_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
-
-#define ClearMtimeFlag(pinode) clear_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
-#define SetMtimeFlag(pinode)   set_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
-#define MtimeFlag(pinode)      test_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
-
-#define ClearCtimeFlag(pinode) clear_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
-#define SetCtimeFlag(pinode)   set_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
-#define CtimeFlag(pinode)      test_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
-
-#define ClearModeFlag(pinode) clear_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
-#define SetModeFlag(pinode)   set_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
-#define ModeFlag(pinode)      test_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
-
 /* per superblock private orangefs info */
 struct orangefs_sb_info_s {
        struct orangefs_khandle root_khandle;
@@ -436,6 +409,8 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
 
 int orangefs_permission(struct inode *inode, int mask);
 
+int orangefs_update_time(struct inode *, struct timespec *, int);
+
 /*
  * defined in xattr.c
  */
@@ -478,8 +453,6 @@ bool __is_daemon_in_service(void);
  */
 __s32 fsid_of_op(struct orangefs_kernel_op_s *op);
 
-int orangefs_flush_inode(struct inode *inode);
-
 ssize_t orangefs_inode_getxattr(struct inode *inode,
                             const char *name,
                             void *buffer,
index f82336496311c532c3150f1e85b1caf59de03abb..97fe93129f38872eb40e7828bf6824d3b348dfae 100644 (file)
@@ -4,6 +4,7 @@
  *
  * See COPYING in top-level directory.
  */
+#include <linux/kernel.h>
 #include "protocol.h"
 #include "orangefs-kernel.h"
 #include "orangefs-dev-proto.h"
@@ -437,89 +438,8 @@ int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr)
 
        op_release(new_op);
 
-       /*
-        * successful setattr should clear the atime, mtime and
-        * ctime flags.
-        */
-       if (ret == 0) {
-               ClearAtimeFlag(orangefs_inode);
-               ClearMtimeFlag(orangefs_inode);
-               ClearCtimeFlag(orangefs_inode);
-               ClearModeFlag(orangefs_inode);
+       if (ret == 0)
                orangefs_inode->getattr_time = jiffies - 1;
-       }
-
-       return ret;
-}
-
-int orangefs_flush_inode(struct inode *inode)
-{
-       /*
-        * If it is a dirty inode, this function gets called.
-        * Gather all the information that needs to be setattr'ed
-        * Right now, this will only be used for mode, atime, mtime
-        * and/or ctime.
-        */
-       struct iattr wbattr;
-       int ret;
-       int mtime_flag;
-       int ctime_flag;
-       int atime_flag;
-       int mode_flag;
-       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
-
-       memset(&wbattr, 0, sizeof(wbattr));
-
-       /*
-        * check inode flags up front, and clear them if they are set.  This
-        * will prevent multiple processes from all trying to flush the same
-        * inode if they call close() simultaneously
-        */
-       mtime_flag = MtimeFlag(orangefs_inode);
-       ClearMtimeFlag(orangefs_inode);
-       ctime_flag = CtimeFlag(orangefs_inode);
-       ClearCtimeFlag(orangefs_inode);
-       atime_flag = AtimeFlag(orangefs_inode);
-       ClearAtimeFlag(orangefs_inode);
-       mode_flag = ModeFlag(orangefs_inode);
-       ClearModeFlag(orangefs_inode);
-
-       /*  -- Lazy atime,mtime and ctime update --
-        * Note: all times are dictated by server in the new scheme
-        * and not by the clients
-        *
-        * Also mode updates are being handled now..
-        */
-
-       if (mtime_flag)
-               wbattr.ia_valid |= ATTR_MTIME;
-       if (ctime_flag)
-               wbattr.ia_valid |= ATTR_CTIME;
-       if (atime_flag)
-               wbattr.ia_valid |= ATTR_ATIME;
-
-       if (mode_flag) {
-               wbattr.ia_mode = inode->i_mode;
-               wbattr.ia_valid |= ATTR_MODE;
-       }
-
-       gossip_debug(GOSSIP_UTILS_DEBUG,
-                    "*********** orangefs_flush_inode: %pU "
-                    "(ia_valid %d)\n",
-                    get_khandle_from_ino(inode),
-                    wbattr.ia_valid);
-       if (wbattr.ia_valid == 0) {
-               gossip_debug(GOSSIP_UTILS_DEBUG,
-                            "orangefs_flush_inode skipping setattr()\n");
-               return 0;
-       }
-
-       gossip_debug(GOSSIP_UTILS_DEBUG,
-                    "orangefs_flush_inode (%pU) writing mode %o\n",
-                    get_khandle_from_ino(inode),
-                    inode->i_mode);
-
-       ret = orangefs_inode_setattr(inode, &wbattr);
 
        return ret;
 }
@@ -606,7 +526,7 @@ int orangefs_normalize_to_errno(__s32 error_code)
        /* Convert ORANGEFS encoded errno values into regular errno values. */
        } else if ((-error_code) & ORANGEFS_ERROR_BIT) {
                i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS);
-               if (i < sizeof(PINT_errno_mapping)/sizeof(*PINT_errno_mapping))
+               if (i < ARRAY_SIZE(PINT_errno_mapping))
                        error_code = -PINT_errno_mapping[i];
                else
                        error_code = -EINVAL;
index 47ebd9bfd1a1be6c638261c4167d530cd5cb3b2a..36f1390b5ed7d6d324471ca145c529cda5477555 100644 (file)
@@ -40,7 +40,7 @@ static int orangefs_show_options(struct seq_file *m, struct dentry *root)
 {
        struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(root->d_sb);
 
-       if (root->d_sb->s_flags & MS_POSIXACL)
+       if (root->d_sb->s_flags & SB_POSIXACL)
                seq_puts(m, ",acl");
        if (orangefs_sb->flags & ORANGEFS_OPT_INTR)
                seq_puts(m, ",intr");
@@ -60,7 +60,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
         * Force any potential flags that might be set from the mount
         * to zero, ie, initialize to unset.
         */
-       sb->s_flags &= ~MS_POSIXACL;
+       sb->s_flags &= ~SB_POSIXACL;
        orangefs_sb->flags &= ~ORANGEFS_OPT_INTR;
        orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
 
@@ -73,7 +73,7 @@ static int parse_mount_options(struct super_block *sb, char *options,
                token = match_token(p, tokens, args);
                switch (token) {
                case Opt_acl:
-                       sb->s_flags |= MS_POSIXACL;
+                       sb->s_flags |= SB_POSIXACL;
                        break;
                case Opt_intr:
                        orangefs_sb->flags |= ORANGEFS_OPT_INTR;
@@ -99,8 +99,6 @@ static void orangefs_inode_cache_ctor(void *req)
 
        inode_init_once(&orangefs_inode->vfs_inode);
        init_rwsem(&orangefs_inode->xattr_sem);
-
-       orangefs_inode->vfs_inode.i_version = 1;
 }
 
 static struct inode *orangefs_alloc_inode(struct super_block *sb)
@@ -119,7 +117,6 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb)
        orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL;
        orangefs_inode->last_failed_block_index_read = 0;
        memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target));
-       orangefs_inode->pinode_flags = 0;
 
        gossip_debug(GOSSIP_SUPER_DEBUG,
                     "orangefs_alloc_inode: allocated %p\n",
@@ -299,21 +296,9 @@ void fsid_key_table_finalize(void)
 {
 }
 
-/* Called whenever the VFS dirties the inode in response to atime updates */
-static void orangefs_dirty_inode(struct inode *inode, int flags)
-{
-       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
-
-       gossip_debug(GOSSIP_SUPER_DEBUG,
-                    "orangefs_dirty_inode: %pU\n",
-                    get_khandle_from_ino(inode));
-       SetAtimeFlag(orangefs_inode);
-}
-
 static const struct super_operations orangefs_s_ops = {
        .alloc_inode = orangefs_alloc_inode,
        .destroy_inode = orangefs_destroy_inode,
-       .dirty_inode = orangefs_dirty_inode,
        .drop_inode = generic_delete_inode,
        .statfs = orangefs_statfs,
        .remount_fs = orangefs_remount_fs,
@@ -522,7 +507,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
 
        ret = orangefs_fill_sb(sb,
              &new_op->downcall.resp.fs_mount, data,
-             flags & MS_SILENT ? 1 : 0);
+             flags & SB_SILENT ? 1 : 0);
 
        if (ret) {
                d = ERR_PTR(ret);
index d856cdf917634cd97abdec56bd5daab45a6f7d9a..db107fe91ab398462f23622ea5bcf6ddd3151458 100644 (file)
@@ -15,4 +15,5 @@ const struct inode_operations orangefs_symlink_inode_operations = {
        .getattr = orangefs_getattr,
        .listxattr = orangefs_listxattr,
        .permission = orangefs_permission,
+       .update_time = orangefs_update_time,
 };
index be03578181d211ac03c18aad58ad469d0d6c2618..288d20f9a55a220d3782f4eaf10e62c491c3fc5c 100644 (file)
@@ -326,7 +326,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
 {
        struct ovl_fs *ofs = sb->s_fs_info;
 
-       if (!(*flags & MS_RDONLY) && ovl_force_readonly(ofs))
+       if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs))
                return -EROFS;
 
        return 0;
@@ -1190,7 +1190,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_err;
 
                if (!ofs->workdir)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
 
                sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth;
                sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran;
@@ -1203,7 +1203,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        /* If the upper fs is nonexistent, we mark overlayfs r/o too */
        if (!ofs->upper_mnt)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        else if (ofs->upper_mnt->mnt_sb != ofs->same_sb)
                ofs->same_sb = NULL;
 
@@ -1213,7 +1213,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        goto out_free_oe;
 
                if (!ofs->indexdir)
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
        }
 
        /* Show index=off/on in /proc/mounts for any of the reasons above */
@@ -1227,7 +1227,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_op = &ovl_super_operations;
        sb->s_xattr = ovl_xattr_handlers;
        sb->s_fs_info = ofs;
-       sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
+       sb->s_flags |= SB_POSIXACL | SB_NOREMOTELOCK;
 
        err = -ENOMEM;
        root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
index 349c9d56d4b34a0b307c5ef5b3fdc552ae030b0c..6d98566201ef5c1f152a11c5849e9e02bd55189b 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1018,13 +1018,19 @@ const struct file_operations pipefifo_fops = {
 
 /*
  * Currently we rely on the pipe array holding a power-of-2 number
- * of pages.
+ * of pages. Returns 0 on error.
  */
-static inline unsigned int round_pipe_size(unsigned int size)
+unsigned int round_pipe_size(unsigned int size)
 {
        unsigned long nr_pages;
 
+       if (size < pipe_min_size)
+               size = pipe_min_size;
+
        nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (nr_pages == 0)
+               return 0;
+
        return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
 }
 
@@ -1040,6 +1046,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
        long ret = 0;
 
        size = round_pipe_size(arg);
+       if (size == 0)
+               return -EINVAL;
        nr_pages = size >> PAGE_SHIFT;
 
        if (!nr_pages)
@@ -1117,20 +1125,13 @@ out_revert_acct:
 }
 
 /*
- * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
+ * This should work even if CONFIG_PROC_FS isn't set, as proc_dopipe_max_size
  * will return an error.
  */
 int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
                 size_t *lenp, loff_t *ppos)
 {
-       int ret;
-
-       ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
-       if (ret < 0 || !write)
-               return ret;
-
-       pipe_max_size = round_pipe_size(pipe_max_size);
-       return ret;
+       return proc_dopipe_max_size(table, write, buf, lenp, ppos);
 }
 
 /*
index f7456c4e7d0f1810c6c541adf577252c93d85656..ead487e8051087d2d81889673737653567874ee9 100644 (file)
@@ -21,6 +21,7 @@ proc-y        += loadavg.o
 proc-y += meminfo.o
 proc-y += stat.o
 proc-y += uptime.o
+proc-y += util.o
 proc-y += version.o
 proc-y += softirqs.o
 proc-y += namespaces.o
index 6f6fc1672ad1af5f942165de0c3ac42c08f390cc..79375fc115d277f9336e8aa75b0c1846dc093fc9 100644 (file)
@@ -366,6 +366,11 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
                   cpumask_pr_args(&task->cpus_allowed));
 }
 
+static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
+{
+       seq_printf(m, "CoreDumping:\t%d\n", !!mm->core_state);
+}
+
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task)
 {
@@ -376,6 +381,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
 
        if (mm) {
                task_mem(m, mm);
+               task_core_dumping(m, mm);
                mmput(mm);
        }
        task_sig(m, task);
index 9d357b2ea6cb59af34af699cb46689464aef419e..28fa85276eec6679cd1ad7a0b408975cbea9204f 100644 (file)
@@ -443,8 +443,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
                save_stack_trace_tsk(task, &trace);
 
                for (i = 0; i < trace.nr_entries; i++) {
-                       seq_printf(m, "[<%pK>] %pB\n",
-                                  (void *)entries[i], (void *)entries[i]);
+                       seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
                }
                unlock_trace(task);
        }
@@ -1682,7 +1681,7 @@ const struct inode_operations proc_pid_link_inode_operations = {
 
 /* building an inode */
 
-void task_dump_owner(struct task_struct *task, mode_t mode,
+void task_dump_owner(struct task_struct *task, umode_t mode,
                     kuid_t *ruid, kgid_t *rgid)
 {
        /* Depending on the state of dumpable compute who should own a
index 225f541f7078c937c6a4c25264069700025a0582..dd0f826224274bd20acbe6944dc7fb6a3709329a 100644 (file)
@@ -483,7 +483,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
 
        /* User space would break if executables or devices appear on proc */
        s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
-       s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
+       s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
        s->s_blocksize = 1024;
        s->s_blocksize_bits = 10;
        s->s_magic = PROC_SUPER_MAGIC;
index a34195e92b206c9f2cd44ee4a627f53efedc3fd8..4a67188c8d74cdc4269c3d60a60b3245cd9837e9 100644 (file)
@@ -100,31 +100,10 @@ static inline struct task_struct *get_proc_task(struct inode *inode)
        return get_pid_task(proc_pid(inode), PIDTYPE_PID);
 }
 
-void task_dump_owner(struct task_struct *task, mode_t mode,
+void task_dump_owner(struct task_struct *task, umode_t mode,
                     kuid_t *ruid, kgid_t *rgid);
 
-static inline unsigned name_to_int(const struct qstr *qstr)
-{
-       const char *name = qstr->name;
-       int len = qstr->len;
-       unsigned n = 0;
-
-       if (len > 1 && *name == '0')
-               goto out;
-       while (len-- > 0) {
-               unsigned c = *name++ - '0';
-               if (c > 9)
-                       goto out;
-               if (n >= (~0U-9)/10)
-                       goto out;
-               n *= 10;
-               n += c;
-       }
-       return n;
-out:
-       return ~0U;
-}
-
+unsigned name_to_int(const struct qstr *qstr);
 /*
  * Offset of the first process in the /proc root directory..
  */
index 9bc5c58c00ee7a97964482ccd0ad78fd73a4406c..a000d7547479e8849acbea6dea406dc5b2bb7c46 100644 (file)
@@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v)
                LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
                LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
                nr_running(), nr_threads,
-               task_active_pid_ns(current)->last_pid);
+               idr_get_cursor(&task_active_pid_ns(current)->idr));
        return 0;
 }
 
index 4e42aba97f2e3ed694aa5df146ecbf949c0b3f68..ede8e64974be240368d11ab47f3227f72a016e23 100644 (file)
@@ -91,7 +91,7 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
 {
        struct pid_namespace *ns;
 
-       if (flags & MS_KERNMOUNT) {
+       if (flags & SB_KERNMOUNT) {
                ns = data;
                data = NULL;
        } else {
diff --git a/fs/proc/util.c b/fs/proc/util.c
new file mode 100644 (file)
index 0000000..b161cfa
--- /dev/null
@@ -0,0 +1,23 @@
+#include <linux/dcache.h>
+
+unsigned name_to_int(const struct qstr *qstr)
+{
+       const char *name = qstr->name;
+       int len = qstr->len;
+       unsigned n = 0;
+
+       if (len > 1 && *name == '0')
+               goto out;
+       do {
+               unsigned c = *name++ - '0';
+               if (c > 9)
+                       goto out;
+               if (n >= (~0U-9)/10)
+                       goto out;
+               n *= 10;
+               n += c;
+       } while (--len > 0);
+       return n;
+out:
+       return ~0U;
+}
index 7b635d17321377e4868554a6ad338a1bd413b3cc..b786840facd96e8dbb3cb0ce76125d915bc0c916 100644 (file)
@@ -45,10 +45,10 @@ struct proc_fs_info {
 static int show_sb_opts(struct seq_file *m, struct super_block *sb)
 {
        static const struct proc_fs_info fs_info[] = {
-               { MS_SYNCHRONOUS, ",sync" },
-               { MS_DIRSYNC, ",dirsync" },
-               { MS_MANDLOCK, ",mand" },
-               { MS_LAZYTIME, ",lazytime" },
+               { SB_SYNCHRONOUS, ",sync" },
+               { SB_DIRSYNC, ",dirsync" },
+               { SB_MANDLOCK, ",mand" },
+               { SB_LAZYTIME, ",lazytime" },
                { 0, NULL }
        };
        const struct proc_fs_info *fs_infop;
index 423159abd50182812656c6f73e421da272187670..691032107f8c78776668a93ca4c546dc2a46282a 100644 (file)
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
 
 static int pstore_new_entry;
 
-static void pstore_timefunc(unsigned long);
+static void pstore_timefunc(struct timer_list *);
 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
 
 static void pstore_dowork(struct work_struct *);
@@ -890,7 +890,7 @@ static void pstore_dowork(struct work_struct *work)
        pstore_get_records(1);
 }
 
-static void pstore_timefunc(unsigned long dummy)
+static void pstore_timefunc(struct timer_list *unused)
 {
        if (pstore_new_entry) {
                pstore_new_entry = 0;
index 3a67cfb142d886c11558409c6711260ecf9dab70..3d46fe302fcb15372c40461a64107e02c29ee97d 100644 (file)
@@ -47,7 +47,7 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data)
        sync_filesystem(sb);
        qs = qnx4_sb(sb);
        qs->Version = QNX4_VERSION;
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -199,7 +199,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
 
        s->s_op = &qnx4_sops;
        s->s_magic = QNX4_SUPER_MAGIC;
-       s->s_flags |= MS_RDONLY;        /* Yup, read-only yet */
+       s->s_flags |= SB_RDONLY;        /* Yup, read-only yet */
 
        /* Check the superblock signature. Since the qnx4 code is
           dangerous, we should leave as quickly as possible
index 1192422a1c5628e5782961252e2a1bdab58237c7..4aeb26bcb4d029695226b15da569f8610b8f1585 100644 (file)
@@ -56,7 +56,7 @@ static int qnx6_show_options(struct seq_file *seq, struct dentry *root)
 static int qnx6_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -427,7 +427,7 @@ mmi_success:
        }
        s->s_op = &qnx6_sops;
        s->s_magic = QNX6_SUPER_MAGIC;
-       s->s_flags |= MS_RDONLY;        /* Yup, read-only yet */
+       s->s_flags |= SB_RDONLY;        /* Yup, read-only yet */
 
        /* ease the later tree level calculations */
        sbi = QNX6_SB(s);
index 39f1b0b0c76fbb24cec8b2388cbcfc11dfb0ae4b..020c597ef9b6e66a74f786d70302238d32dc729a 100644 (file)
@@ -941,12 +941,13 @@ static int dqinit_needed(struct inode *inode, int type)
 }
 
 /* This routine is guarded by s_umount semaphore */
-static void add_dquot_ref(struct super_block *sb, int type)
+static int add_dquot_ref(struct super_block *sb, int type)
 {
        struct inode *inode, *old_inode = NULL;
 #ifdef CONFIG_QUOTA_DEBUG
        int reserved = 0;
 #endif
+       int err = 0;
 
        spin_lock(&sb->s_inode_list_lock);
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
@@ -966,7 +967,11 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        reserved = 1;
 #endif
                iput(old_inode);
-               __dquot_initialize(inode, type);
+               err = __dquot_initialize(inode, type);
+               if (err) {
+                       iput(inode);
+                       goto out;
+               }
 
                /*
                 * We hold a reference to 'inode' so it couldn't have been
@@ -981,7 +986,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
        }
        spin_unlock(&sb->s_inode_list_lock);
        iput(old_inode);
-
+out:
 #ifdef CONFIG_QUOTA_DEBUG
        if (reserved) {
                quota_error(sb, "Writes happened before quota was turned on "
@@ -989,6 +994,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
                        "Please run quotacheck(8)");
        }
 #endif
+       return err;
 }
 
 /*
@@ -2379,10 +2385,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        dqopt->flags |= dquot_state_flag(flags, type);
        spin_unlock(&dq_state_lock);
 
-       add_dquot_ref(sb, type);
-
-       return 0;
+       error = add_dquot_ref(sb, type);
+       if (error)
+               dquot_disable(sb, type, flags);
 
+       return error;
 out_file_init:
        dqopt->files[type] = NULL;
        iput(inode);
@@ -2985,7 +2992,8 @@ static int __init dquot_init(void)
        pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
                " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
 
-       register_shrinker(&dqcache_shrinker);
+       if (register_shrinker(&dqcache_shrinker))
+               panic("Cannot register dquot shrinker");
 
        return 0;
 }
index 11a48affa882415376e279aabe0e0df7b81c49fd..b13fc024d2eed8b3201ae1c6dda226a0031d1e8d 100644 (file)
@@ -2106,7 +2106,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                        journal_end(th);
                        goto out_inserted_sd;
                }
-       } else if (inode->i_sb->s_flags & MS_POSIXACL) {
+       } else if (inode->i_sb->s_flags & SB_POSIXACL) {
                reiserfs_warning(inode->i_sb, "jdm-13090",
                                 "ACLs aren't enabled in the fs, "
                                 "but vfs thinks they are!");
index 69ff280bdfe889a77afac56a24ed6d51ad668aad..70057359fbaf3b7d98c8109dd0bcba1fe112b5cb 100644 (file)
@@ -1960,7 +1960,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
        /*
         * Cancel flushing of old commits. Note that neither of these works
         * will be requeued because superblock is being shutdown and doesn't
-        * have MS_ACTIVE set.
+        * have SB_ACTIVE set.
         */
        reiserfs_cancel_old_flush(sb);
        /* wait for all commits to finish */
@@ -4302,7 +4302,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
                 * Avoid queueing work when sb is being shut down. Transaction
                 * will be flushed on journal shutdown.
                 */
-               if (sb->s_flags & MS_ACTIVE)
+               if (sb->s_flags & SB_ACTIVE)
                        queue_delayed_work(REISERFS_SB(sb)->commit_wq,
                                           &journal->j_work, HZ / 10);
        }
@@ -4393,7 +4393,7 @@ void reiserfs_abort_journal(struct super_block *sb, int errno)
        if (!journal->j_errno)
                journal->j_errno = errno;
 
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        set_bit(J_ABORTED, &journal->j_state);
 
 #ifdef CONFIG_REISERFS_CHECK
index 64f49cafbc5bff7b7c34786e56851408f4f20b55..7e288d97adcbb7504f2c3c2953ca24debd770b01 100644 (file)
@@ -390,7 +390,7 @@ void __reiserfs_error(struct super_block *sb, const char *id,
                return;
 
        reiserfs_info(sb, "Remounting filesystem read-only\n");
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        reiserfs_abort_journal(sb, -EIO);
 }
 
@@ -409,7 +409,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
        printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
               error_buf);
 
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        reiserfs_abort_journal(sb, errno);
 }
 
index 5464ec517702f1bb18e0d3323ef57564ef271b53..1fc934d244592e2df6ee902e8606f39cd313055d 100644 (file)
@@ -121,7 +121,7 @@ void reiserfs_schedule_old_flush(struct super_block *s)
         * Avoid scheduling flush when sb is being shut down. It can race
         * with journal shutdown and free still queued delayed work.
         */
-       if (sb_rdonly(s) || !(s->s_flags & MS_ACTIVE))
+       if (sb_rdonly(s) || !(s->s_flags & SB_ACTIVE))
                return;
 
        spin_lock(&sbi->old_work_lock);
@@ -252,11 +252,11 @@ static int finish_unfinished(struct super_block *s)
 
 #ifdef CONFIG_QUOTA
        /* Needed for iput() to work correctly and not trash data */
-       if (s->s_flags & MS_ACTIVE) {
+       if (s->s_flags & SB_ACTIVE) {
                ms_active_set = 0;
        } else {
                ms_active_set = 1;
-               s->s_flags |= MS_ACTIVE;
+               s->s_flags |= SB_ACTIVE;
        }
        /* Turn on quotas so that they are updated correctly */
        for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
@@ -411,7 +411,7 @@ static int finish_unfinished(struct super_block *s)
        reiserfs_write_lock(s);
        if (ms_active_set)
                /* Restore the flag back */
-               s->s_flags &= ~MS_ACTIVE;
+               s->s_flags &= ~SB_ACTIVE;
 #endif
        pathrelse(&path);
        if (done)
@@ -1521,7 +1521,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                        goto out_err_unlock;
        }
 
-       if (*mount_flags & MS_RDONLY) {
+       if (*mount_flags & SB_RDONLY) {
                reiserfs_write_unlock(s);
                reiserfs_xattr_init(s, *mount_flags);
                /* remount read-only */
@@ -1567,7 +1567,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
 
                /* now it is safe to call journal_begin */
-               s->s_flags &= ~MS_RDONLY;
+               s->s_flags &= ~SB_RDONLY;
                err = journal_begin(&th, s, 10);
                if (err)
                        goto out_err_unlock;
@@ -1575,7 +1575,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                /* Mount a partition which is read-only, read-write */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
                REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
-               s->s_flags &= ~MS_RDONLY;
+               s->s_flags &= ~SB_RDONLY;
                set_sb_umount_state(rs, REISERFS_ERROR_FS);
                if (!old_format_only(s))
                        set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
@@ -1590,7 +1590,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                goto out_err_unlock;
 
        reiserfs_write_unlock(s);
-       if (!(*mount_flags & MS_RDONLY)) {
+       if (!(*mount_flags & SB_RDONLY)) {
                dquot_resume(s, -1);
                reiserfs_write_lock(s);
                finish_unfinished(s);
@@ -2055,7 +2055,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
                SWARN(silent, s, "clm-7000",
                      "Detected readonly device, marking FS readonly");
-               s->s_flags |= MS_RDONLY;
+               s->s_flags |= SB_RDONLY;
        }
        args.objectid = REISERFS_ROOT_OBJECTID;
        args.dirid = REISERFS_ROOT_PARENT_OBJECTID;
@@ -2591,7 +2591,6 @@ out:
                return err;
        if (inode->i_size < off + len - towrite)
                i_size_write(inode, off + len - towrite);
-       inode->i_version++;
        inode->i_mtime = inode->i_ctime = current_time(inode);
        mark_inode_dirty(inode);
        return len - towrite;
index 46492fb37a4c6a44194f171fdab768acbbd17f06..5dbf5324bdda53377e57d38661ec3a835256c85f 100644 (file)
@@ -959,7 +959,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
 
 /*
  * We need to take a copy of the mount flags since things like
- * MS_RDONLY don't get set until *after* we're called.
+ * SB_RDONLY don't get set until *after* we're called.
  * mount_flags != mount_options
  */
 int reiserfs_xattr_init(struct super_block *s, int mount_flags)
@@ -971,7 +971,7 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
        if (err)
                goto error;
 
-       if (d_really_is_negative(privroot) && !(mount_flags & MS_RDONLY)) {
+       if (d_really_is_negative(privroot) && !(mount_flags & SB_RDONLY)) {
                inode_lock(d_inode(s->s_root));
                err = create_privroot(REISERFS_SB(s)->priv_root);
                inode_unlock(d_inode(s->s_root));
@@ -999,11 +999,11 @@ error:
                clear_bit(REISERFS_POSIXACL, &REISERFS_SB(s)->s_mount_opt);
        }
 
-       /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
+       /* The super_block SB_POSIXACL must mirror the (no)acl mount option. */
        if (reiserfs_posixacl(s))
-               s->s_flags |= MS_POSIXACL;
+               s->s_flags |= SB_POSIXACL;
        else
-               s->s_flags &= ~MS_POSIXACL;
+               s->s_flags &= ~SB_POSIXACL;
 
        return err;
 }
index 0186fe6d39f3b4d2e77497d4d34a7691204ae9fa..8f06fd1f3d692426a38011a77e5f6f8be0e1ad8f 100644 (file)
@@ -451,7 +451,7 @@ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int romfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
@@ -502,7 +502,7 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_maxbytes = 0xFFFFFFFF;
        sb->s_magic = ROMFS_MAGIC;
-       sb->s_flags |= MS_RDONLY | MS_NOATIME;
+       sb->s_flags |= SB_RDONLY | SB_NOATIME;
        sb->s_op = &romfs_super_ops;
 
 #ifdef CONFIG_ROMFS_ON_MTD
index cf01e15a7b16dff288e2479014d20e0d787096d5..8a73b97217c8a5fe24f0e30047354fe058644018 100644 (file)
@@ -195,7 +195,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
                (u64) le64_to_cpu(sblk->id_table_start));
 
        sb->s_maxbytes = MAX_LFS_FILESIZE;
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        sb->s_op = &squashfs_super_ops;
 
        err = -ENOMEM;
@@ -373,7 +373,7 @@ static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 static int squashfs_remount(struct super_block *sb, int *flags, char *data)
 {
        sync_filesystem(sb);
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
index b072a8bab71a1464d3d06d9017a2a33d441e0ef9..5b2a24f0f263b65ad62de722ffe06d28b7bb8102 100644 (file)
@@ -35,11 +35,11 @@ static int flags_by_mnt(int mnt_flags)
 static int flags_by_sb(int s_flags)
 {
        int flags = 0;
-       if (s_flags & MS_SYNCHRONOUS)
+       if (s_flags & SB_SYNCHRONOUS)
                flags |= ST_SYNCHRONOUS;
-       if (s_flags & MS_MANDLOCK)
+       if (s_flags & SB_MANDLOCK)
                flags |= ST_MANDLOCK;
-       if (s_flags & MS_RDONLY)
+       if (s_flags & SB_RDONLY)
                flags |= ST_RDONLY;
        return flags;
 }
index 20b8f82e115b647b9d6f29c0877a1a2e3d6fc44c..fb49510c5dcfa9710d9118404cf84094390c72e4 100644 (file)
@@ -30,7 +30,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
        void *ns;
        bool new_sb;
 
-       if (!(flags & MS_KERNMOUNT)) {
+       if (!(flags & SB_KERNMOUNT)) {
                if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
                        return ERR_PTR(-EPERM);
        }
index 3c47b7d5d4cf8e8d38eae5a7ab0484d3132e30e3..bec9f79adb25a207dca39feeb924b7e946f664db 100644 (file)
@@ -63,7 +63,7 @@ static int sysv_remount(struct super_block *sb, int *flags, char *data)
 
        sync_filesystem(sb);
        if (sbi->s_forced_ro)
-               *flags |= MS_RDONLY;
+               *flags |= SB_RDONLY;
        return 0;
 }
 
index 0d56e486b39225c597fa7f2dd86f8ec54d5e25d7..89765ddfb738c075b44003e0e1dfeea414d24c7f 100644 (file)
@@ -333,7 +333,7 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
        /* set up enough so that it can read an inode */
        sb->s_op = &sysv_sops;
        if (sbi->s_forced_ro)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        if (sbi->s_truncate)
                sb->s_d_op = &sysv_dentry_operations;
        root_inode = sysv_iget(sb, SYSV_ROOT_INO);
index a02aa59d1e245124dcead693b15cd128fa2b1f4e..dfe85069586ebe001eb9bba841405bb4a43e94bf 100644 (file)
@@ -1406,7 +1406,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time,
        if (flags & S_MTIME)
                inode->i_mtime = *time;
 
-       if (!(inode->i_sb->s_flags & MS_LAZYTIME))
+       if (!(inode->i_sb->s_flags & SB_LAZYTIME))
                iflags |= I_DIRTY_SYNC;
 
        release = ui->dirty;
index 3be28900bf3750364f79ac36803eb0eae1066f80..fe77e9625e84791d007821ef231e633a58539f64 100644 (file)
@@ -84,7 +84,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
        if (!c->ro_error) {
                c->ro_error = 1;
                c->no_chk_data_crc = 0;
-               c->vfs_sb->s_flags |= MS_RDONLY;
+               c->vfs_sb->s_flags |= SB_RDONLY;
                ubifs_warn(c, "switched to read-only mode, error %d", err);
                dump_stack();
        }
index 7503e7cdf8702a61ce91576316bfce10bd63e113..0beb285b143da6cfe9cc238ede64313f8f75a604 100644 (file)
@@ -968,7 +968,7 @@ static int parse_standard_option(const char *option)
 
        pr_notice("UBIFS: parse %s\n", option);
        if (!strcmp(option, "sync"))
-               return MS_SYNCHRONOUS;
+               return SB_SYNCHRONOUS;
        return 0;
 }
 
@@ -1160,8 +1160,8 @@ static int mount_ubifs(struct ubifs_info *c)
        size_t sz;
 
        c->ro_mount = !!sb_rdonly(c->vfs_sb);
-       /* Suppress error messages while probing if MS_SILENT is set */
-       c->probing = !!(c->vfs_sb->s_flags & MS_SILENT);
+       /* Suppress error messages while probing if SB_SILENT is set */
+       c->probing = !!(c->vfs_sb->s_flags & SB_SILENT);
 
        err = init_constants_early(c);
        if (err)
@@ -1852,7 +1852,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
                return err;
        }
 
-       if (c->ro_mount && !(*flags & MS_RDONLY)) {
+       if (c->ro_mount && !(*flags & SB_RDONLY)) {
                if (c->ro_error) {
                        ubifs_msg(c, "cannot re-mount R/W due to prior errors");
                        return -EROFS;
@@ -1864,7 +1864,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
                err = ubifs_remount_rw(c);
                if (err)
                        return err;
-       } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
+       } else if (!c->ro_mount && (*flags & SB_RDONLY)) {
                if (c->ro_error) {
                        ubifs_msg(c, "cannot re-mount R/O due to prior errors");
                        return -EROFS;
@@ -2117,7 +2117,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
         */
        ubi = open_ubi(name, UBI_READONLY);
        if (IS_ERR(ubi)) {
-               if (!(flags & MS_SILENT))
+               if (!(flags & SB_SILENT))
                        pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
                               current->pid, name, (int)PTR_ERR(ubi));
                return ERR_CAST(ubi);
@@ -2143,18 +2143,18 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
                kfree(c);
                /* A new mount point for already mounted UBIFS */
                dbg_gen("this ubi volume is already mounted");
-               if (!!(flags & MS_RDONLY) != c1->ro_mount) {
+               if (!!(flags & SB_RDONLY) != c1->ro_mount) {
                        err = -EBUSY;
                        goto out_deact;
                }
        } else {
-               err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+               err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
                if (err)
                        goto out_deact;
                /* We do not support atime */
-               sb->s_flags |= MS_ACTIVE;
+               sb->s_flags |= SB_ACTIVE;
 #ifndef CONFIG_UBIFS_ATIME_SUPPORT
-               sb->s_flags |= MS_NOATIME;
+               sb->s_flags |= SB_NOATIME;
 #else
                ubifs_msg(c, "full atime support is enabled.");
 #endif
index 63c7468147eb9b573db8270f55e8840febe99ffc..5ee7af879cc41ab9242e110fbbc7fa8a274d56f1 100644 (file)
@@ -1201,7 +1201,7 @@ struct ubifs_debug_info;
  * @need_recovery: %1 if the file-system needs recovery
  * @replaying: %1 during journal replay
  * @mounting: %1 while mounting
- * @probing: %1 while attempting to mount if MS_SILENT mount flag is set
+ * @probing: %1 while attempting to mount if SB_SILENT mount flag is set
  * @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
  * @replay_list: temporary list used during journal replay
  * @replay_buds: list of buds to replay
@@ -1850,7 +1850,7 @@ __printf(2, 3)
 void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...);
 /*
  * A conditional variant of 'ubifs_err()' which doesn't output anything
- * if probing (ie. MS_SILENT set).
+ * if probing (ie. SB_SILENT set).
  */
 #define ubifs_errc(c, fmt, ...)                                                \
 do {                                                                   \
index f80e0a0f24d3374d02b4b7fa50df7ed2372a4ebf..f73239a9a97daa4a9046252323ef888b27691589 100644 (file)
@@ -650,7 +650,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        sync_filesystem(sb);
        if (lvidiu) {
                int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
-               if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+               if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
                        return -EACCES;
        }
 
@@ -673,10 +673,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        sbi->s_dmode = uopt.dmode;
        write_unlock(&sbi->s_cred_lock);
 
-       if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+       if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
                goto out_unlock;
 
-       if (*flags & MS_RDONLY)
+       if (*flags & SB_RDONLY)
                udf_close_lvid(sb);
        else
                udf_open_lvid(sb);
index b5cd79065ef9a84d72aa427252d81a0edd0abb2d..e727ee07dbe4ac1a824d8626f917d4a561e3eb0a 100644 (file)
@@ -115,7 +115,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -205,7 +205,7 @@ do_more:
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
 
        if (overflow) {
@@ -567,7 +567,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
        
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -688,7 +688,7 @@ cg_found:
 succed:
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
index 916b4a4289334f277d481020ef51c8c7f17a11b7..e1ef0f0a135352992ecff800ca94044673ad0ad2 100644 (file)
@@ -112,7 +112,7 @@ void ufs_free_inode (struct inode * inode)
 
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        
        ufs_mark_sb_dirty(sb);
@@ -146,14 +146,14 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
                set_buffer_uptodate(bh);
                mark_buffer_dirty(bh);
                unlock_buffer(bh);
-               if (sb->s_flags & MS_SYNCHRONOUS)
+               if (sb->s_flags & SB_SYNCHRONOUS)
                        sync_dirty_buffer(bh);
                brelse(bh);
        }
 
        fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
        ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
 
        UFSD("EXIT\n");
@@ -284,7 +284,7 @@ cg_found:
        }
        ubh_mark_buffer_dirty (USPI_UBH(uspi));
        ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                ubh_sync_block(UCPI_UBH(ucpi));
        ufs_mark_sb_dirty(sb);
 
@@ -330,7 +330,7 @@ cg_found:
                ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, ts.tv_nsec);
                mark_buffer_dirty(bh);
                unlock_buffer(bh);
-               if (sb->s_flags & MS_SYNCHRONOUS)
+               if (sb->s_flags & SB_SYNCHRONOUS)
                        sync_dirty_buffer(bh);
                brelse(bh);
        }
index 6440003f8ddc62ea689512f4dc25525334d2a42a..4d497e9c68830a1d7a229841c18db5e11903c642 100644 (file)
@@ -282,7 +282,7 @@ void ufs_error (struct super_block * sb, const char * function,
                usb1->fs_clean = UFS_FSBAD;
                ubh_mark_buffer_dirty(USPI_UBH(uspi));
                ufs_mark_sb_dirty(sb);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
        va_start(args, fmt);
        vaf.fmt = fmt;
@@ -320,7 +320,7 @@ void ufs_panic (struct super_block * sb, const char * function,
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
-       sb->s_flags |= MS_RDONLY;
+       sb->s_flags |= SB_RDONLY;
        pr_crit("panic (device %s): %s: %pV\n",
                sb->s_id, function, &vaf);
        va_end(args);
@@ -905,7 +905,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=old is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -921,7 +921,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=nextstep is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -937,7 +937,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=nextstep-cd is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -953,7 +953,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=openstep is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        
@@ -968,7 +968,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                if (!sb_rdonly(sb)) {
                        if (!silent)
                                pr_info("ufstype=hp is supported read-only\n");
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                }
                break;
        default:
@@ -1125,21 +1125,21 @@ magic_found:
                        break;
                case UFS_FSACTIVE:
                        pr_err("%s(): fs is active\n", __func__);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                case UFS_FSBAD:
                        pr_err("%s(): fs is bad\n", __func__);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                default:
                        pr_err("%s(): can't grok fs_clean 0x%x\n",
                               __func__, usb1->fs_clean);
-                       sb->s_flags |= MS_RDONLY;
+                       sb->s_flags |= SB_RDONLY;
                        break;
                }
        } else {
                pr_err("%s(): fs needs fsck\n", __func__);
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        }
 
        /*
@@ -1328,7 +1328,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                return -EINVAL;
        }
 
-       if ((bool)(*mount_flags & MS_RDONLY) == sb_rdonly(sb)) {
+       if ((bool)(*mount_flags & SB_RDONLY) == sb_rdonly(sb)) {
                UFS_SB(sb)->s_mount_opt = new_mount_opt;
                mutex_unlock(&UFS_SB(sb)->s_lock);
                return 0;
@@ -1337,7 +1337,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
        /*
         * fs was mouted as rw, remounting ro
         */
-       if (*mount_flags & MS_RDONLY) {
+       if (*mount_flags & SB_RDONLY) {
                ufs_put_super_internal(sb);
                usb1->fs_time = cpu_to_fs32(sb, get_seconds());
                if ((flags & UFS_ST_MASK) == UFS_ST_SUN
@@ -1346,7 +1346,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                        ufs_set_fs_state(sb, usb1, usb3,
                                UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
                ubh_mark_buffer_dirty (USPI_UBH(uspi));
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        } else {
        /*
         * fs was mounted as ro, remounting rw
@@ -1370,7 +1370,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
                        mutex_unlock(&UFS_SB(sb)->s_lock);
                        return -EPERM;
                }
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 #endif
        }
        UFS_SB(sb)->s_mount_opt = new_mount_opt;
index 19e546a412510205a615ac414d1dc1517a2786d6..89bf16b4d9377293fa842c48f2b9b637f83c62c2 100644 (file)
@@ -850,9 +850,9 @@ static void
 xfs_iext_free_last_leaf(
        struct xfs_ifork        *ifp)
 {
-       ifp->if_u1.if_root = NULL;
        ifp->if_height--;
        kmem_free(ifp->if_u1.if_root);
+       ifp->if_u1.if_root = NULL;
 }
 
 void
index 1c90ec41e9dfa9cc8a380635ebf99d7380d9fe03..c79a1616b79d7530f50746aecd26800d95d4fd48 100644 (file)
@@ -42,11 +42,6 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
 
-static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
-{
-       return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
-}
-
 /*
  * Copy inode type and data and attr format specific information from the
  * on-disk inode to the in-core inode and fork structures.  For fifos, devices,
@@ -792,7 +787,8 @@ xfs_iflush_fork(
        case XFS_DINODE_FMT_DEV:
                if (iip->ili_fields & XFS_ILOG_DEV) {
                        ASSERT(whichfork == XFS_DATA_FORK);
-                       xfs_dinode_put_rdev(dip, sysv_encode_dev(VFS_I(ip)->i_rdev));
+                       xfs_dinode_put_rdev(dip,
+                                       linux_to_xfs_dev_t(VFS_I(ip)->i_rdev));
                }
                break;
 
index 6282bfc1afa9387880826c5742d328a082de3919..99562ec0de56113e85aa4b60652239af671a0bd5 100644 (file)
@@ -204,6 +204,16 @@ static inline kgid_t xfs_gid_to_kgid(uint32_t gid)
        return make_kgid(&init_user_ns, gid);
 }
 
+static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
+{
+       return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
+}
+
+static inline xfs_dev_t linux_to_xfs_dev_t(dev_t dev)
+{
+       return sysv_encode_dev(dev);
+}
+
 /*
  * Various platform dependent calls that don't fit anywhere else
  */
index 38d4227895aef844192e67198df39be01f29c334..a503af96d780ecf9fd15f6c6d579bc9822abac6f 100644 (file)
@@ -781,17 +781,17 @@ xfs_log_mount_finish(
         * something to an unlinked inode, the irele won't cause
         * premature truncation and freeing of the inode, which results
         * in log recovery failure.  We have to evict the unreferenced
-        * lru inodes after clearing MS_ACTIVE because we don't
+        * lru inodes after clearing SB_ACTIVE because we don't
         * otherwise clean up the lru if there's a subsequent failure in
         * xfs_mountfs, which leads to us leaking the inodes if nothing
         * else (e.g. quotacheck) references the inodes before the
         * mount failure occurs.
         */
-       mp->m_super->s_flags |= MS_ACTIVE;
+       mp->m_super->s_flags |= SB_ACTIVE;
        error = xlog_recover_finish(mp->m_log);
        if (!error)
                xfs_log_work_queue(mp);
-       mp->m_super->s_flags &= ~MS_ACTIVE;
+       mp->m_super->s_flags &= ~SB_ACTIVE;
        evict_inodes(mp->m_super);
 
        /*
index f663022353c0d98b681e51fe8578096d0fbf57bf..5122d3021117f00e20d6dd1e195c28666cc71076 100644 (file)
@@ -212,9 +212,9 @@ xfs_parseargs(
         */
        if (sb_rdonly(sb))
                mp->m_flags |= XFS_MOUNT_RDONLY;
-       if (sb->s_flags & MS_DIRSYNC)
+       if (sb->s_flags & SB_DIRSYNC)
                mp->m_flags |= XFS_MOUNT_DIRSYNC;
-       if (sb->s_flags & MS_SYNCHRONOUS)
+       if (sb->s_flags & SB_SYNCHRONOUS)
                mp->m_flags |= XFS_MOUNT_WSYNC;
 
        /*
@@ -1312,7 +1312,7 @@ xfs_fs_remount(
        }
 
        /* ro -> rw */
-       if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+       if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
                if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
                        xfs_warn(mp,
                "ro->rw transition prohibited on norecovery mount");
@@ -1368,7 +1368,7 @@ xfs_fs_remount(
        }
 
        /* rw -> ro */
-       if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+       if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
                /* Free the per-AG metadata reservation pool. */
                error = xfs_fs_unreserve_ag_blocks(mp);
                if (error) {
index 5f2f32408011d2df4db8ddc6d41297640bab7101..fcc5dfc70aa0c93a635514cb7acececb9e05e2e6 100644 (file)
@@ -30,7 +30,7 @@ extern void xfs_qm_exit(void);
 
 #ifdef CONFIG_XFS_POSIX_ACL
 # define XFS_ACL_STRING                "ACLs, "
-# define set_posix_acl_flag(sb)        ((sb)->s_flags |= MS_POSIXACL)
+# define set_posix_acl_flag(sb)        ((sb)->s_flags |= SB_POSIXACL)
 #else
 # define XFS_ACL_STRING
 # define set_posix_acl_flag(sb)        do { } while (0)
index af2cc94a61bf9e1e7f4e29d6bd1fd1ad997bbf5e..963b755d19b0329e9e15b238db5f62074d615c51 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/compiler.h>
 
+#define CUT_HERE               "------------[ cut here ]------------\n"
+
 #ifdef CONFIG_GENERIC_BUG
 #define BUGFLAG_WARNING                (1 << 0)
 #define BUGFLAG_ONCE           (1 << 1)
@@ -90,10 +92,11 @@ extern void warn_slowpath_null(const char *file, const int line);
 #define __WARN_printf_taint(taint, arg...)                             \
        warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
 #else
+extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
 #define __WARN()               __WARN_TAINT(TAINT_WARN)
-#define __WARN_printf(arg...)  do { printk(arg); __WARN(); } while (0)
+#define __WARN_printf(arg...)  do { __warn_printk(arg); __WARN(); } while (0)
 #define __WARN_printf_taint(taint, arg...)                             \
-       do { printk(arg); __WARN_TAINT(taint); } while (0)
+       do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
 #endif
 
 /* used internally by panic.c */
@@ -130,7 +133,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 
 #ifndef WARN_ON_ONCE
 #define WARN_ON_ONCE(condition)        ({                              \
-       static bool __section(.data.unlikely) __warned;         \
+       static bool __section(.data.once) __warned;             \
        int __ret_warn_once = !!(condition);                    \
                                                                \
        if (unlikely(__ret_warn_once && !__warned)) {           \
@@ -142,7 +145,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 #endif
 
 #define WARN_ONCE(condition, format...)        ({                      \
-       static bool __section(.data.unlikely) __warned;         \
+       static bool __section(.data.once) __warned;             \
        int __ret_warn_once = !!(condition);                    \
                                                                \
        if (unlikely(__ret_warn_once && !__warned)) {           \
@@ -153,7 +156,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 })
 
 #define WARN_TAINT_ONCE(condition, taint, format...)   ({      \
-       static bool __section(.data.unlikely) __warned;         \
+       static bool __section(.data.once) __warned;             \
        int __ret_warn_once = !!(condition);                    \
                                                                \
        if (unlikely(__ret_warn_once && !__warned)) {           \
index 757dc6ffc7ba5f294bae554af3e6d1a01c1207e5..b234d54f2cb6e4c23a21db2af3b225264eccae2a 100644 (file)
@@ -805,15 +805,23 @@ static inline int pmd_trans_huge(pmd_t pmd)
 {
        return 0;
 }
-#ifndef __HAVE_ARCH_PMD_WRITE
+#ifndef pmd_write
 static inline int pmd_write(pmd_t pmd)
 {
        BUG();
        return 0;
 }
-#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* pmd_write */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+       BUG();
+       return 0;
+}
+#endif /* pud_write */
+
 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
        (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
         !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
index 6d95769310842b5712ccb59038a9cc4d91d99ce0..03cc5f9bba71c135f98d804640a9a8c4f2598c65 100644 (file)
@@ -44,6 +44,7 @@ extern char __entry_text_start[], __entry_text_end[];
 extern char __start_rodata[], __end_rodata[];
 extern char __irqentry_text_start[], __irqentry_text_end[];
 extern char __softirqentry_text_start[], __softirqentry_text_end[];
+extern char __start_once[], __end_once[];
 
 /* Start and end of .ctors section - used for constructor calls. */
 extern char __ctors_start[], __ctors_end[];
index 5d2add1a6c964870b212f848879338f7b9b3ba18..2388737395502101e308ec7c77b51b7f422dc194 100644 (file)
@@ -44,9 +44,6 @@
 #define cpu_to_mem(cpu)                ((void)(cpu),0)
 #endif
 
-#ifndef parent_node
-#define parent_node(node)      ((void)(node),0)
-#endif
 #ifndef cpumask_of_node
   #ifdef CONFIG_NEED_MULTIPLE_NODES
     #define cpumask_of_node(node)      ((node) == 0 ? cpu_online_mask : cpu_none_mask)
index bdcd1caae0923db6e2bd6a964b6e9b00cad3c5ef..ee8b707d9fa9c6b5ed1e15f655c2b8f118868bf2 100644 (file)
        MEM_KEEP(init.data)                                             \
        MEM_KEEP(exit.data)                                             \
        *(.data.unlikely)                                               \
+       VMLINUX_SYMBOL(__start_once) = .;                               \
+       *(.data.once)                                                   \
+       VMLINUX_SYMBOL(__end_once) = .;                                 \
        STRUCT_ALIGN();                                                 \
        *(__tracepoints)                                                \
        /* implement dynamic printk debug */                            \
index 6abf0a3604dc391c3218a063473c737be662aa8a..38d9c5861ed8c110d5dbac1b6a807252b520f23e 100644 (file)
@@ -242,6 +242,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
                   unsigned int ivsize);
 ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
                        int offset, size_t size, int flags);
+void af_alg_free_resources(struct af_alg_async_req *areq);
 void af_alg_async_cb(struct crypto_async_request *_req, int err);
 unsigned int af_alg_poll(struct file *file, struct socket *sock,
                         poll_table *wait);
index 7a714054301226ffe34de6e22b43e38bd1979ea1..df9807a3caaea4d9f77e0797e594f3a8b12ddd11 100644 (file)
@@ -284,6 +284,11 @@ struct drm_display_info {
         * @hdmi: advance features of a HDMI sink.
         */
        struct drm_hdmi_info hdmi;
+
+       /**
+        * @non_desktop: Non desktop display (HMD).
+        */
+       bool non_desktop;
 };
 
 int drm_display_info_set_bus_formats(struct drm_display_info *info,
index 6f35909b8add3221fe8ad4b4c108e5a1498c7f08..2ec41d032e560f0fa4d04ef8d23d6b3442529473 100644 (file)
@@ -362,7 +362,8 @@ void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
                                   const struct drm_display_mode *mode,
                                   enum hdmi_quantization_range rgb_quant_range,
-                                  bool rgb_quant_range_selectable);
+                                  bool rgb_quant_range_selectable,
+                                  bool is_hdmi2_sink);
 
 /**
  * drm_eld_mnl - Get ELD monitor name length in bytes.
index 0b4ac2ebc6105a0f3d9f364c7bf83eeee2928ad4..b21e827c5c78775742533d28f3baebfd6e0a9b5e 100644 (file)
@@ -728,6 +728,13 @@ struct drm_mode_config {
         */
        struct drm_property *suggested_y_property;
 
+       /**
+        * @non_desktop_property: Optional connector property with a hint
+        * that device isn't a standard display, and the console/desktop,
+        * should not be displayed on it.
+        */
+       struct drm_property *non_desktop_property;
+
        /* dumb ioctl parameters */
        uint32_t preferred_depth, prefer_shadow;
 
index c40111f36d5e282ae9e1d9edb002fede6e346ec0..e9f9d400c322f10f04417d69456de75a4d3be17e 100644 (file)
 /* must be greater than maximal clock id */
 #define CLK_NR_CLKS            461
 
+/* Exynos4x12 ISP clocks */
+#define CLK_ISP_FIMC_ISP                1
+#define CLK_ISP_FIMC_DRC                2
+#define CLK_ISP_FIMC_FD                         3
+#define CLK_ISP_FIMC_LITE0              4
+#define CLK_ISP_FIMC_LITE1              5
+#define CLK_ISP_MCUISP                  6
+#define CLK_ISP_GICISP                  7
+#define CLK_ISP_SMMU_ISP                8
+#define CLK_ISP_SMMU_DRC                9
+#define CLK_ISP_SMMU_FD                        10
+#define CLK_ISP_SMMU_LITE0             11
+#define CLK_ISP_SMMU_LITE1             12
+#define CLK_ISP_PPMUISPMX              13
+#define CLK_ISP_PPMUISPX               14
+#define CLK_ISP_MCUCTL_ISP             15
+#define CLK_ISP_MPWM_ISP               16
+#define CLK_ISP_I2C0_ISP               17
+#define CLK_ISP_I2C1_ISP               18
+#define CLK_ISP_MTCADC_ISP             19
+#define CLK_ISP_PWM_ISP                        20
+#define CLK_ISP_WDT_ISP                        21
+#define CLK_ISP_UART_ISP               22
+#define CLK_ISP_ASYNCAXIM              23
+#define CLK_ISP_SMMU_ISPCX             24
+#define CLK_ISP_SPI0_ISP               25
+#define CLK_ISP_SPI1_ISP               26
+
+#define CLK_ISP_DIV_ISP0               27
+#define CLK_ISP_DIV_ISP1               28
+#define CLK_ISP_DIV_MCUISP0            29
+#define CLK_ISP_DIV_MCUISP1            30
+
+#define CLK_NR_ISP_CLKS                        31
+
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
index 8c92528aa48ad1cd94494d6d0f09074ccff32c3f..8ba99a5e3fd34a64f8581ca51cedd47f11e803c5 100644 (file)
 #define CLKID_SD_EMMC_A_CLK0   119
 #define CLKID_SD_EMMC_B_CLK0   122
 #define CLKID_SD_EMMC_C_CLK0   125
+#define CLKID_VPU_0_SEL                126
+#define CLKID_VPU_0            128
+#define CLKID_VPU_1_SEL                129
+#define CLKID_VPU_1            131
+#define CLKID_VPU              132
+#define CLKID_VAPB_0_SEL       133
+#define CLKID_VAPB_0           135
+#define CLKID_VAPB_1_SEL       136
+#define CLKID_VAPB_1           138
+#define CLKID_VAPB_SEL         139
+#define CLKID_VAPB             140
 
 #endif /* __GXBB_CLKC_H */
index de62a83b6c802661b5f7cecae28a7fbabcd34076..e2f99ae72d5c56c07d0b66c167c1131b510d90c6 100644 (file)
 #define IMX7D_ARM_M4_ROOT_SRC          67
 #define IMX7D_ARM_M4_ROOT_CG           68
 #define IMX7D_ARM_M4_ROOT_DIV          69
-#define IMX7D_ARM_M0_ROOT_CLK          70
-#define IMX7D_ARM_M0_ROOT_SRC          71
-#define IMX7D_ARM_M0_ROOT_CG           72
-#define IMX7D_ARM_M0_ROOT_DIV          73
+#define IMX7D_ARM_M0_ROOT_CLK          70      /* unused */
+#define IMX7D_ARM_M0_ROOT_SRC          71      /* unused */
+#define IMX7D_ARM_M0_ROOT_CG           72      /* unused */
+#define IMX7D_ARM_M0_ROOT_DIV          73      /* unused */
 #define IMX7D_MAIN_AXI_ROOT_CLK                74
 #define IMX7D_MAIN_AXI_ROOT_SRC                75
 #define IMX7D_MAIN_AXI_ROOT_CG         76
diff --git a/include/dt-bindings/clock/mt2712-clk.h b/include/dt-bindings/clock/mt2712-clk.h
new file mode 100644 (file)
index 0000000..48a8e79
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT2712_H
+#define _DT_BINDINGS_CLK_MT2712_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_MAINPLL            0
+#define CLK_APMIXED_UNIVPLL            1
+#define CLK_APMIXED_VCODECPLL          2
+#define CLK_APMIXED_VENCPLL            3
+#define CLK_APMIXED_APLL1              4
+#define CLK_APMIXED_APLL2              5
+#define CLK_APMIXED_LVDSPLL            6
+#define CLK_APMIXED_LVDSPLL2           7
+#define CLK_APMIXED_MSDCPLL            8
+#define CLK_APMIXED_MSDCPLL2           9
+#define CLK_APMIXED_TVDPLL             10
+#define CLK_APMIXED_MMPLL              11
+#define CLK_APMIXED_ARMCA35PLL         12
+#define CLK_APMIXED_ARMCA72PLL         13
+#define CLK_APMIXED_ETHERPLL           14
+#define CLK_APMIXED_NR_CLK             15
+
+/* TOPCKGEN */
+
+#define CLK_TOP_ARMCA35PLL             0
+#define CLK_TOP_ARMCA35PLL_600M                1
+#define CLK_TOP_ARMCA35PLL_400M                2
+#define CLK_TOP_ARMCA72PLL             3
+#define CLK_TOP_SYSPLL                 4
+#define CLK_TOP_SYSPLL_D2              5
+#define CLK_TOP_SYSPLL1_D2             6
+#define CLK_TOP_SYSPLL1_D4             7
+#define CLK_TOP_SYSPLL1_D8             8
+#define CLK_TOP_SYSPLL1_D16            9
+#define CLK_TOP_SYSPLL_D3              10
+#define CLK_TOP_SYSPLL2_D2             11
+#define CLK_TOP_SYSPLL2_D4             12
+#define CLK_TOP_SYSPLL_D5              13
+#define CLK_TOP_SYSPLL3_D2             14
+#define CLK_TOP_SYSPLL3_D4             15
+#define CLK_TOP_SYSPLL_D7              16
+#define CLK_TOP_SYSPLL4_D2             17
+#define CLK_TOP_SYSPLL4_D4             18
+#define CLK_TOP_UNIVPLL                        19
+#define CLK_TOP_UNIVPLL_D7             20
+#define CLK_TOP_UNIVPLL_D26            21
+#define CLK_TOP_UNIVPLL_D52            22
+#define CLK_TOP_UNIVPLL_D104           23
+#define CLK_TOP_UNIVPLL_D208           24
+#define CLK_TOP_UNIVPLL_D2             25
+#define CLK_TOP_UNIVPLL1_D2            26
+#define CLK_TOP_UNIVPLL1_D4            27
+#define CLK_TOP_UNIVPLL1_D8            28
+#define CLK_TOP_UNIVPLL_D3             29
+#define CLK_TOP_UNIVPLL2_D2            30
+#define CLK_TOP_UNIVPLL2_D4            31
+#define CLK_TOP_UNIVPLL2_D8            32
+#define CLK_TOP_UNIVPLL_D5             33
+#define CLK_TOP_UNIVPLL3_D2            34
+#define CLK_TOP_UNIVPLL3_D4            35
+#define CLK_TOP_UNIVPLL3_D8            36
+#define CLK_TOP_F_MP0_PLL1             37
+#define CLK_TOP_F_MP0_PLL2             38
+#define CLK_TOP_F_BIG_PLL1             39
+#define CLK_TOP_F_BIG_PLL2             40
+#define CLK_TOP_F_BUS_PLL1             41
+#define CLK_TOP_F_BUS_PLL2             42
+#define CLK_TOP_APLL1                  43
+#define CLK_TOP_APLL1_D2               44
+#define CLK_TOP_APLL1_D4               45
+#define CLK_TOP_APLL1_D8               46
+#define CLK_TOP_APLL1_D16              47
+#define CLK_TOP_APLL2                  48
+#define CLK_TOP_APLL2_D2               49
+#define CLK_TOP_APLL2_D4               50
+#define CLK_TOP_APLL2_D8               51
+#define CLK_TOP_APLL2_D16              52
+#define CLK_TOP_LVDSPLL                        53
+#define CLK_TOP_LVDSPLL_D2             54
+#define CLK_TOP_LVDSPLL_D4             55
+#define CLK_TOP_LVDSPLL_D8             56
+#define CLK_TOP_LVDSPLL2               57
+#define CLK_TOP_LVDSPLL2_D2            58
+#define CLK_TOP_LVDSPLL2_D4            59
+#define CLK_TOP_LVDSPLL2_D8            60
+#define CLK_TOP_ETHERPLL_125M          61
+#define CLK_TOP_ETHERPLL_50M           62
+#define CLK_TOP_CVBS                   63
+#define CLK_TOP_CVBS_D2                        64
+#define CLK_TOP_SYS_26M                        65
+#define CLK_TOP_MMPLL                  66
+#define CLK_TOP_MMPLL_D2               67
+#define CLK_TOP_VENCPLL                        68
+#define CLK_TOP_VENCPLL_D2             69
+#define CLK_TOP_VCODECPLL              70
+#define CLK_TOP_VCODECPLL_D2           71
+#define CLK_TOP_TVDPLL                 72
+#define CLK_TOP_TVDPLL_D2              73
+#define CLK_TOP_TVDPLL_D4              74
+#define CLK_TOP_TVDPLL_D8              75
+#define CLK_TOP_TVDPLL_429M            76
+#define CLK_TOP_TVDPLL_429M_D2         77
+#define CLK_TOP_TVDPLL_429M_D4         78
+#define CLK_TOP_MSDCPLL                        79
+#define CLK_TOP_MSDCPLL_D2             80
+#define CLK_TOP_MSDCPLL_D4             81
+#define CLK_TOP_MSDCPLL2               82
+#define CLK_TOP_MSDCPLL2_D2            83
+#define CLK_TOP_MSDCPLL2_D4            84
+#define CLK_TOP_CLK26M_D2              85
+#define CLK_TOP_D2A_ULCLK_6P5M         86
+#define CLK_TOP_VPLL3_DPIX             87
+#define CLK_TOP_VPLL_DPIX              88
+#define CLK_TOP_LTEPLL_FS26M           89
+#define CLK_TOP_DMPLL                  90
+#define CLK_TOP_DSI0_LNTC              91
+#define CLK_TOP_DSI1_LNTC              92
+#define CLK_TOP_LVDSTX3_CLKDIG_CTS     93
+#define CLK_TOP_LVDSTX_CLKDIG_CTS      94
+#define CLK_TOP_CLKRTC_EXT             95
+#define CLK_TOP_CLKRTC_INT             96
+#define CLK_TOP_CSI0                   97
+#define CLK_TOP_CVBSPLL                        98
+#define CLK_TOP_AXI_SEL                        99
+#define CLK_TOP_MEM_SEL                        100
+#define CLK_TOP_MM_SEL                 101
+#define CLK_TOP_PWM_SEL                        102
+#define CLK_TOP_VDEC_SEL               103
+#define CLK_TOP_VENC_SEL               104
+#define CLK_TOP_MFG_SEL                        105
+#define CLK_TOP_CAMTG_SEL              106
+#define CLK_TOP_UART_SEL               107
+#define CLK_TOP_SPI_SEL                        108
+#define CLK_TOP_USB20_SEL              109
+#define CLK_TOP_USB30_SEL              110
+#define CLK_TOP_MSDC50_0_HCLK_SEL      111
+#define CLK_TOP_MSDC50_0_SEL           112
+#define CLK_TOP_MSDC30_1_SEL           113
+#define CLK_TOP_MSDC30_2_SEL           114
+#define CLK_TOP_MSDC30_3_SEL           115
+#define CLK_TOP_AUDIO_SEL              116
+#define CLK_TOP_AUD_INTBUS_SEL         117
+#define CLK_TOP_PMICSPI_SEL            118
+#define CLK_TOP_DPILVDS1_SEL           119
+#define CLK_TOP_ATB_SEL                        120
+#define CLK_TOP_NR_SEL                 121
+#define CLK_TOP_NFI2X_SEL              122
+#define CLK_TOP_IRDA_SEL               123
+#define CLK_TOP_CCI400_SEL             124
+#define CLK_TOP_AUD_1_SEL              125
+#define CLK_TOP_AUD_2_SEL              126
+#define CLK_TOP_MEM_MFG_IN_AS_SEL      127
+#define CLK_TOP_AXI_MFG_IN_AS_SEL      128
+#define CLK_TOP_SCAM_SEL               129
+#define CLK_TOP_NFIECC_SEL             130
+#define CLK_TOP_PE2_MAC_P0_SEL         131
+#define CLK_TOP_PE2_MAC_P1_SEL         132
+#define CLK_TOP_DPILVDS_SEL            133
+#define CLK_TOP_MSDC50_3_HCLK_SEL      134
+#define CLK_TOP_HDCP_SEL               135
+#define CLK_TOP_HDCP_24M_SEL           136
+#define CLK_TOP_RTC_SEL                        137
+#define CLK_TOP_SPINOR_SEL             138
+#define CLK_TOP_APLL_SEL               139
+#define CLK_TOP_APLL2_SEL              140
+#define CLK_TOP_A1SYS_HP_SEL           141
+#define CLK_TOP_A2SYS_HP_SEL           142
+#define CLK_TOP_ASM_L_SEL              143
+#define CLK_TOP_ASM_M_SEL              144
+#define CLK_TOP_ASM_H_SEL              145
+#define CLK_TOP_I2SO1_SEL              146
+#define CLK_TOP_I2SO2_SEL              147
+#define CLK_TOP_I2SO3_SEL              148
+#define CLK_TOP_TDMO0_SEL              149
+#define CLK_TOP_TDMO1_SEL              150
+#define CLK_TOP_I2SI1_SEL              151
+#define CLK_TOP_I2SI2_SEL              152
+#define CLK_TOP_I2SI3_SEL              153
+#define CLK_TOP_ETHER_125M_SEL         154
+#define CLK_TOP_ETHER_50M_SEL          155
+#define CLK_TOP_JPGDEC_SEL             156
+#define CLK_TOP_SPISLV_SEL             157
+#define CLK_TOP_ETHER_50M_RMII_SEL     158
+#define CLK_TOP_CAM2TG_SEL             159
+#define CLK_TOP_DI_SEL                 160
+#define CLK_TOP_TVD_SEL                        161
+#define CLK_TOP_I2C_SEL                        162
+#define CLK_TOP_PWM_INFRA_SEL          163
+#define CLK_TOP_MSDC0P_AES_SEL         164
+#define CLK_TOP_CMSYS_SEL              165
+#define CLK_TOP_GCPU_SEL               166
+#define CLK_TOP_AUD_APLL1_SEL          167
+#define CLK_TOP_AUD_APLL2_SEL          168
+#define CLK_TOP_DA_AUDULL_VTX_6P5M_SEL 169
+#define CLK_TOP_APLL_DIV0              170
+#define CLK_TOP_APLL_DIV1              171
+#define CLK_TOP_APLL_DIV2              172
+#define CLK_TOP_APLL_DIV3              173
+#define CLK_TOP_APLL_DIV4              174
+#define CLK_TOP_APLL_DIV5              175
+#define CLK_TOP_APLL_DIV6              176
+#define CLK_TOP_APLL_DIV7              177
+#define CLK_TOP_APLL_DIV_PDN0          178
+#define CLK_TOP_APLL_DIV_PDN1          179
+#define CLK_TOP_APLL_DIV_PDN2          180
+#define CLK_TOP_APLL_DIV_PDN3          181
+#define CLK_TOP_APLL_DIV_PDN4          182
+#define CLK_TOP_APLL_DIV_PDN5          183
+#define CLK_TOP_APLL_DIV_PDN6          184
+#define CLK_TOP_APLL_DIV_PDN7          185
+#define CLK_TOP_NR_CLK                 186
+
+/* INFRACFG */
+
+#define CLK_INFRA_DBGCLK               0
+#define CLK_INFRA_GCE                  1
+#define CLK_INFRA_M4U                  2
+#define CLK_INFRA_KP                   3
+#define CLK_INFRA_AO_SPI0              4
+#define CLK_INFRA_AO_SPI1              5
+#define CLK_INFRA_AO_UART5             6
+#define CLK_INFRA_NR_CLK               7
+
+/* PERICFG */
+
+#define CLK_PERI_NFI                   0
+#define CLK_PERI_THERM                 1
+#define CLK_PERI_PWM0                  2
+#define CLK_PERI_PWM1                  3
+#define CLK_PERI_PWM2                  4
+#define CLK_PERI_PWM3                  5
+#define CLK_PERI_PWM4                  6
+#define CLK_PERI_PWM5                  7
+#define CLK_PERI_PWM6                  8
+#define CLK_PERI_PWM7                  9
+#define CLK_PERI_PWM                   10
+#define CLK_PERI_AP_DMA                        11
+#define CLK_PERI_MSDC30_0              12
+#define CLK_PERI_MSDC30_1              13
+#define CLK_PERI_MSDC30_2              14
+#define CLK_PERI_MSDC30_3              15
+#define CLK_PERI_UART0                 16
+#define CLK_PERI_UART1                 17
+#define CLK_PERI_UART2                 18
+#define CLK_PERI_UART3                 19
+#define CLK_PERI_I2C0                  20
+#define CLK_PERI_I2C1                  21
+#define CLK_PERI_I2C2                  22
+#define CLK_PERI_I2C3                  23
+#define CLK_PERI_I2C4                  24
+#define CLK_PERI_AUXADC                        25
+#define CLK_PERI_SPI0                  26
+#define CLK_PERI_SPI                   27
+#define CLK_PERI_I2C5                  28
+#define CLK_PERI_SPI2                  29
+#define CLK_PERI_SPI3                  30
+#define CLK_PERI_SPI5                  31
+#define CLK_PERI_UART4                 32
+#define CLK_PERI_SFLASH                        33
+#define CLK_PERI_GMAC                  34
+#define CLK_PERI_PCIE0                 35
+#define CLK_PERI_PCIE1                 36
+#define CLK_PERI_GMAC_PCLK             37
+#define CLK_PERI_MSDC50_0_EN           38
+#define CLK_PERI_MSDC30_1_EN           39
+#define CLK_PERI_MSDC30_2_EN           40
+#define CLK_PERI_MSDC30_3_EN           41
+#define CLK_PERI_MSDC50_0_HCLK_EN      42
+#define CLK_PERI_MSDC50_3_HCLK_EN      43
+#define CLK_PERI_NR_CLK                        44
+
+/* MCUCFG */
+
+#define CLK_MCU_MP0_SEL                        0
+#define CLK_MCU_MP2_SEL                        1
+#define CLK_MCU_BUS_SEL                        2
+#define CLK_MCU_NR_CLK                 3
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D                   0
+#define CLK_MFG_NR_CLK                 1
+
+/* MMSYS */
+
+#define CLK_MM_SMI_COMMON              0
+#define CLK_MM_SMI_LARB0               1
+#define CLK_MM_CAM_MDP                 2
+#define CLK_MM_MDP_RDMA0               3
+#define CLK_MM_MDP_RDMA1               4
+#define CLK_MM_MDP_RSZ0                        5
+#define CLK_MM_MDP_RSZ1                        6
+#define CLK_MM_MDP_RSZ2                        7
+#define CLK_MM_MDP_TDSHP0              8
+#define CLK_MM_MDP_TDSHP1              9
+#define CLK_MM_MDP_CROP                        10
+#define CLK_MM_MDP_WDMA                        11
+#define CLK_MM_MDP_WROT0               12
+#define CLK_MM_MDP_WROT1               13
+#define CLK_MM_FAKE_ENG                        14
+#define CLK_MM_MUTEX_32K               15
+#define CLK_MM_DISP_OVL0               16
+#define CLK_MM_DISP_OVL1               17
+#define CLK_MM_DISP_RDMA0              18
+#define CLK_MM_DISP_RDMA1              19
+#define CLK_MM_DISP_RDMA2              20
+#define CLK_MM_DISP_WDMA0              21
+#define CLK_MM_DISP_WDMA1              22
+#define CLK_MM_DISP_COLOR0             23
+#define CLK_MM_DISP_COLOR1             24
+#define CLK_MM_DISP_AAL                        25
+#define CLK_MM_DISP_GAMMA              26
+#define CLK_MM_DISP_UFOE               27
+#define CLK_MM_DISP_SPLIT0             28
+#define CLK_MM_DISP_OD                 29
+#define CLK_MM_DISP_PWM0_MM            30
+#define CLK_MM_DISP_PWM0_26M           31
+#define CLK_MM_DISP_PWM1_MM            32
+#define CLK_MM_DISP_PWM1_26M           33
+#define CLK_MM_DSI0_ENGINE             34
+#define CLK_MM_DSI0_DIGITAL            35
+#define CLK_MM_DSI1_ENGINE             36
+#define CLK_MM_DSI1_DIGITAL            37
+#define CLK_MM_DPI_PIXEL               38
+#define CLK_MM_DPI_ENGINE              39
+#define CLK_MM_DPI1_PIXEL              40
+#define CLK_MM_DPI1_ENGINE             41
+#define CLK_MM_LVDS_PIXEL              42
+#define CLK_MM_LVDS_CTS                        43
+#define CLK_MM_SMI_LARB4               44
+#define CLK_MM_SMI_COMMON1             45
+#define CLK_MM_SMI_LARB5               46
+#define CLK_MM_MDP_RDMA2               47
+#define CLK_MM_MDP_TDSHP2              48
+#define CLK_MM_DISP_OVL2               49
+#define CLK_MM_DISP_WDMA2              50
+#define CLK_MM_DISP_COLOR2             51
+#define CLK_MM_DISP_AAL1               52
+#define CLK_MM_DISP_OD1                        53
+#define CLK_MM_LVDS1_PIXEL             54
+#define CLK_MM_LVDS1_CTS               55
+#define CLK_MM_SMI_LARB7               56
+#define CLK_MM_MDP_RDMA3               57
+#define CLK_MM_MDP_WROT2               58
+#define CLK_MM_DSI2                    59
+#define CLK_MM_DSI2_DIGITAL            60
+#define CLK_MM_DSI3                    61
+#define CLK_MM_DSI3_DIGITAL            62
+#define CLK_MM_NR_CLK                  63
+
+/* IMGSYS */
+
+#define CLK_IMG_SMI_LARB2              0
+#define CLK_IMG_SENINF_SCAM_EN         1
+#define CLK_IMG_SENINF_CAM_EN          2
+#define CLK_IMG_CAM_SV_EN              3
+#define CLK_IMG_CAM_SV1_EN             4
+#define CLK_IMG_CAM_SV2_EN             5
+#define CLK_IMG_NR_CLK                 6
+
+/* BDPSYS */
+
+#define CLK_BDP_BRIDGE_B               0
+#define CLK_BDP_BRIDGE_DRAM            1
+#define CLK_BDP_LARB_DRAM              2
+#define CLK_BDP_WR_CHANNEL_VDI_PXL     3
+#define CLK_BDP_WR_CHANNEL_VDI_DRAM    4
+#define CLK_BDP_WR_CHANNEL_VDI_B       5
+#define CLK_BDP_MT_B                   6
+#define CLK_BDP_DISPFMT_27M            7
+#define CLK_BDP_DISPFMT_27M_VDOUT      8
+#define CLK_BDP_DISPFMT_27_74_74       9
+#define CLK_BDP_DISPFMT_2FS            10
+#define CLK_BDP_DISPFMT_2FS_2FS74_148  11
+#define CLK_BDP_DISPFMT_B              12
+#define CLK_BDP_VDO_DRAM               13
+#define CLK_BDP_VDO_2FS                        14
+#define CLK_BDP_VDO_B                  15
+#define CLK_BDP_WR_CHANNEL_DI_PXL      16
+#define CLK_BDP_WR_CHANNEL_DI_DRAM     17
+#define CLK_BDP_WR_CHANNEL_DI_B                18
+#define CLK_BDP_NR_AGENT               19
+#define CLK_BDP_NR_DRAM                        20
+#define CLK_BDP_NR_B                   21
+#define CLK_BDP_BRIDGE_RT_B            22
+#define CLK_BDP_BRIDGE_RT_DRAM         23
+#define CLK_BDP_LARB_RT_DRAM           24
+#define CLK_BDP_TVD_TDC                        25
+#define CLK_BDP_TVD_54                 26
+#define CLK_BDP_TVD_CBUS               27
+#define CLK_BDP_NR_CLK                 28
+
+/* VDECSYS */
+
+#define CLK_VDEC_CKEN                  0
+#define CLK_VDEC_LARB1_CKEN            1
+#define CLK_VDEC_IMGRZ_CKEN            2
+#define CLK_VDEC_NR_CLK                        3
+
+/* VENCSYS */
+
+#define CLK_VENC_SMI_COMMON_CON                0
+#define CLK_VENC_VENC                  1
+#define CLK_VENC_SMI_LARB6             2
+#define CLK_VENC_NR_CLK                        3
+
+/* JPGDECSYS */
+
+#define CLK_JPGDEC_JPGDEC1             0
+#define CLK_JPGDEC_JPGDEC              1
+#define CLK_JPGDEC_NR_CLK              2
+
+#endif /* _DT_BINDINGS_CLK_MT2712_H */
diff --git a/include/dt-bindings/clock/mt7622-clk.h b/include/dt-bindings/clock/mt7622-clk.h
new file mode 100644 (file)
index 0000000..3e514ed
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7622_H
+#define _DT_BINDINGS_CLK_MT7622_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_TO_U2_PHY              0
+#define CLK_TOP_TO_U2_PHY_1P           1
+#define CLK_TOP_PCIE0_PIPE_EN          2
+#define CLK_TOP_PCIE1_PIPE_EN          3
+#define CLK_TOP_SSUSB_TX250M           4
+#define CLK_TOP_SSUSB_EQ_RX250M                5
+#define CLK_TOP_SSUSB_CDR_REF          6
+#define CLK_TOP_SSUSB_CDR_FB           7
+#define CLK_TOP_SATA_ASIC              8
+#define CLK_TOP_SATA_RBC               9
+#define CLK_TOP_TO_USB3_SYS            10
+#define CLK_TOP_P1_1MHZ                        11
+#define CLK_TOP_4MHZ                   12
+#define CLK_TOP_P0_1MHZ                        13
+#define CLK_TOP_TXCLK_SRC_PRE          14
+#define CLK_TOP_RTC                    15
+#define CLK_TOP_MEMPLL                 16
+#define CLK_TOP_DMPLL                  17
+#define CLK_TOP_SYSPLL_D2              18
+#define CLK_TOP_SYSPLL1_D2             19
+#define CLK_TOP_SYSPLL1_D4             20
+#define CLK_TOP_SYSPLL1_D8             21
+#define CLK_TOP_SYSPLL2_D4             22
+#define CLK_TOP_SYSPLL2_D8             23
+#define CLK_TOP_SYSPLL_D5              24
+#define CLK_TOP_SYSPLL3_D2             25
+#define CLK_TOP_SYSPLL3_D4             26
+#define CLK_TOP_SYSPLL4_D2             27
+#define CLK_TOP_SYSPLL4_D4             28
+#define CLK_TOP_SYSPLL4_D16            29
+#define CLK_TOP_UNIVPLL                        30
+#define CLK_TOP_UNIVPLL_D2             31
+#define CLK_TOP_UNIVPLL1_D2            32
+#define CLK_TOP_UNIVPLL1_D4            33
+#define CLK_TOP_UNIVPLL1_D8            34
+#define CLK_TOP_UNIVPLL1_D16           35
+#define CLK_TOP_UNIVPLL2_D2            36
+#define CLK_TOP_UNIVPLL2_D4            37
+#define CLK_TOP_UNIVPLL2_D8            38
+#define CLK_TOP_UNIVPLL2_D16           39
+#define CLK_TOP_UNIVPLL_D5             40
+#define CLK_TOP_UNIVPLL3_D2            41
+#define CLK_TOP_UNIVPLL3_D4            42
+#define CLK_TOP_UNIVPLL3_D16           43
+#define CLK_TOP_UNIVPLL_D7             44
+#define CLK_TOP_UNIVPLL_D80_D4         45
+#define CLK_TOP_UNIV48M                        46
+#define CLK_TOP_SGMIIPLL               47
+#define CLK_TOP_SGMIIPLL_D2            48
+#define CLK_TOP_AUD1PLL                        49
+#define CLK_TOP_AUD2PLL                        50
+#define CLK_TOP_AUD_I2S2_MCK           51
+#define CLK_TOP_TO_USB3_REF            52
+#define CLK_TOP_PCIE1_MAC_EN           53
+#define CLK_TOP_PCIE0_MAC_EN           54
+#define CLK_TOP_ETH_500M               55
+#define CLK_TOP_AXI_SEL                        56
+#define CLK_TOP_MEM_SEL                        57
+#define CLK_TOP_DDRPHYCFG_SEL          58
+#define CLK_TOP_ETH_SEL                        59
+#define CLK_TOP_PWM_SEL                        60
+#define CLK_TOP_F10M_REF_SEL           61
+#define CLK_TOP_NFI_INFRA_SEL          62
+#define CLK_TOP_FLASH_SEL              63
+#define CLK_TOP_UART_SEL               64
+#define CLK_TOP_SPI0_SEL               65
+#define CLK_TOP_SPI1_SEL               66
+#define CLK_TOP_MSDC50_0_SEL           67
+#define CLK_TOP_MSDC30_0_SEL           68
+#define CLK_TOP_MSDC30_1_SEL           69
+#define CLK_TOP_A1SYS_HP_SEL           70
+#define CLK_TOP_A2SYS_HP_SEL           71
+#define CLK_TOP_INTDIR_SEL             72
+#define CLK_TOP_AUD_INTBUS_SEL         73
+#define CLK_TOP_PMICSPI_SEL            74
+#define CLK_TOP_SCP_SEL                        75
+#define CLK_TOP_ATB_SEL                        76
+#define CLK_TOP_HIF_SEL                        77
+#define CLK_TOP_AUDIO_SEL              78
+#define CLK_TOP_U2_SEL                 79
+#define CLK_TOP_AUD1_SEL               80
+#define CLK_TOP_AUD2_SEL               81
+#define CLK_TOP_IRRX_SEL               82
+#define CLK_TOP_IRTX_SEL               83
+#define CLK_TOP_ASM_L_SEL              84
+#define CLK_TOP_ASM_M_SEL              85
+#define CLK_TOP_ASM_H_SEL              86
+#define CLK_TOP_APLL1_SEL              87
+#define CLK_TOP_APLL2_SEL              88
+#define CLK_TOP_I2S0_MCK_SEL           89
+#define CLK_TOP_I2S1_MCK_SEL           90
+#define CLK_TOP_I2S2_MCK_SEL           91
+#define CLK_TOP_I2S3_MCK_SEL           92
+#define CLK_TOP_APLL1_DIV              93
+#define CLK_TOP_APLL2_DIV              94
+#define CLK_TOP_I2S0_MCK_DIV           95
+#define CLK_TOP_I2S1_MCK_DIV           96
+#define CLK_TOP_I2S2_MCK_DIV           97
+#define CLK_TOP_I2S3_MCK_DIV           98
+#define CLK_TOP_A1SYS_HP_DIV           99
+#define CLK_TOP_A2SYS_HP_DIV           100
+#define CLK_TOP_APLL1_DIV_PD           101
+#define CLK_TOP_APLL2_DIV_PD           102
+#define CLK_TOP_I2S0_MCK_DIV_PD                103
+#define CLK_TOP_I2S1_MCK_DIV_PD                104
+#define CLK_TOP_I2S2_MCK_DIV_PD                105
+#define CLK_TOP_I2S3_MCK_DIV_PD                106
+#define CLK_TOP_A1SYS_HP_DIV_PD                107
+#define CLK_TOP_A2SYS_HP_DIV_PD                108
+#define CLK_TOP_NR_CLK                 109
+
+/* INFRACFG */
+
+#define CLK_INFRA_MUX1_SEL             0
+#define CLK_INFRA_DBGCLK_PD            1
+#define CLK_INFRA_AUDIO_PD             2
+#define CLK_INFRA_IRRX_PD              3
+#define CLK_INFRA_APXGPT_PD            4
+#define CLK_INFRA_PMIC_PD              5
+#define CLK_INFRA_TRNG                 6
+#define CLK_INFRA_NR_CLK               7
+
+/* PERICFG */
+
+#define CLK_PERIBUS_SEL                        0
+#define CLK_PERI_THERM_PD              1
+#define CLK_PERI_PWM1_PD               2
+#define CLK_PERI_PWM2_PD               3
+#define CLK_PERI_PWM3_PD               4
+#define CLK_PERI_PWM4_PD               5
+#define CLK_PERI_PWM5_PD               6
+#define CLK_PERI_PWM6_PD               7
+#define CLK_PERI_PWM7_PD               8
+#define CLK_PERI_PWM_PD                        9
+#define CLK_PERI_AP_DMA_PD             10
+#define CLK_PERI_MSDC30_0_PD           11
+#define CLK_PERI_MSDC30_1_PD           12
+#define CLK_PERI_UART0_PD              13
+#define CLK_PERI_UART1_PD              14
+#define CLK_PERI_UART2_PD              15
+#define CLK_PERI_UART3_PD              16
+#define CLK_PERI_UART4_PD              17
+#define CLK_PERI_BTIF_PD               18
+#define CLK_PERI_I2C0_PD               19
+#define CLK_PERI_I2C1_PD               20
+#define CLK_PERI_I2C2_PD               21
+#define CLK_PERI_SPI1_PD               22
+#define CLK_PERI_AUXADC_PD             23
+#define CLK_PERI_SPI0_PD               24
+#define CLK_PERI_SNFI_PD               25
+#define CLK_PERI_NFI_PD                        26
+#define CLK_PERI_NFIECC_PD             27
+#define CLK_PERI_FLASH_PD              28
+#define CLK_PERI_IRTX_PD               29
+#define CLK_PERI_NR_CLK                        30
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL             0
+#define CLK_APMIXED_MAINPLL            1
+#define CLK_APMIXED_UNIV2PLL           2
+#define CLK_APMIXED_ETH1PLL            3
+#define CLK_APMIXED_ETH2PLL            4
+#define CLK_APMIXED_AUD1PLL            5
+#define CLK_APMIXED_AUD2PLL            6
+#define CLK_APMIXED_TRGPLL             7
+#define CLK_APMIXED_SGMIPLL            8
+#define CLK_APMIXED_MAIN_CORE_EN       9
+#define CLK_APMIXED_NR_CLK             10
+
+/* AUDIOSYS */
+
+#define CLK_AUDIO_AFE                  0
+#define CLK_AUDIO_HDMI                 1
+#define CLK_AUDIO_SPDF                 2
+#define CLK_AUDIO_APLL                 3
+#define CLK_AUDIO_I2SIN1               4
+#define CLK_AUDIO_I2SIN2               5
+#define CLK_AUDIO_I2SIN3               6
+#define CLK_AUDIO_I2SIN4               7
+#define CLK_AUDIO_I2SO1                        8
+#define CLK_AUDIO_I2SO2                        9
+#define CLK_AUDIO_I2SO3                        10
+#define CLK_AUDIO_I2SO4                        11
+#define CLK_AUDIO_ASRCI1               12
+#define CLK_AUDIO_ASRCI2               13
+#define CLK_AUDIO_ASRCO1               14
+#define CLK_AUDIO_ASRCO2               15
+#define CLK_AUDIO_INTDIR               16
+#define CLK_AUDIO_A1SYS                        17
+#define CLK_AUDIO_A2SYS                        18
+#define CLK_AUDIO_UL1                  19
+#define CLK_AUDIO_UL2                  20
+#define CLK_AUDIO_UL3                  21
+#define CLK_AUDIO_UL4                  22
+#define CLK_AUDIO_UL5                  23
+#define CLK_AUDIO_UL6                  24
+#define CLK_AUDIO_DL1                  25
+#define CLK_AUDIO_DL2                  26
+#define CLK_AUDIO_DL3                  27
+#define CLK_AUDIO_DL4                  28
+#define CLK_AUDIO_DL5                  29
+#define CLK_AUDIO_DL6                  30
+#define CLK_AUDIO_DLMCH                        31
+#define CLK_AUDIO_ARB1                 32
+#define CLK_AUDIO_AWB                  33
+#define CLK_AUDIO_AWB2                 34
+#define CLK_AUDIO_DAI                  35
+#define CLK_AUDIO_MOD                  36
+#define CLK_AUDIO_ASRCI3               37
+#define CLK_AUDIO_ASRCI4               38
+#define CLK_AUDIO_ASRCO3               39
+#define CLK_AUDIO_ASRCO4               40
+#define CLK_AUDIO_MEM_ASRC1            41
+#define CLK_AUDIO_MEM_ASRC2            42
+#define CLK_AUDIO_MEM_ASRC3            43
+#define CLK_AUDIO_MEM_ASRC4            44
+#define CLK_AUDIO_MEM_ASRC5            45
+#define CLK_AUDIO_NR_CLK               46
+
+/* SSUSBSYS */
+
+#define CLK_SSUSB_U2_PHY_1P_EN         0
+#define CLK_SSUSB_U2_PHY_EN            1
+#define CLK_SSUSB_REF_EN               2
+#define CLK_SSUSB_SYS_EN               3
+#define CLK_SSUSB_MCU_EN               4
+#define CLK_SSUSB_DMA_EN               5
+#define CLK_SSUSB_NR_CLK               6
+
+/* PCIESYS */
+
+#define CLK_PCIE_P1_AUX_EN             0
+#define CLK_PCIE_P1_OBFF_EN            1
+#define CLK_PCIE_P1_AHB_EN             2
+#define CLK_PCIE_P1_AXI_EN             3
+#define CLK_PCIE_P1_MAC_EN             4
+#define CLK_PCIE_P1_PIPE_EN            5
+#define CLK_PCIE_P0_AUX_EN             6
+#define CLK_PCIE_P0_OBFF_EN            7
+#define CLK_PCIE_P0_AHB_EN             8
+#define CLK_PCIE_P0_AXI_EN             9
+#define CLK_PCIE_P0_MAC_EN             10
+#define CLK_PCIE_P0_PIPE_EN            11
+#define CLK_SATA_AHB_EN                        12
+#define CLK_SATA_AXI_EN                        13
+#define CLK_SATA_ASIC_EN               14
+#define CLK_SATA_RBC_EN                        15
+#define CLK_SATA_PM_EN                 16
+#define CLK_PCIE_NR_CLK                        17
+
+/* ETHSYS */
+
+#define CLK_ETH_HSDMA_EN               0
+#define CLK_ETH_ESW_EN                 1
+#define CLK_ETH_GP2_EN                 2
+#define CLK_ETH_GP1_EN                 3
+#define CLK_ETH_GP0_EN                 4
+#define CLK_ETH_NR_CLK                 5
+
+/* SGMIISYS */
+
+#define CLK_SGMII_TX250M_EN            0
+#define CLK_SGMII_RX250M_EN            1
+#define CLK_SGMII_CDR_REF              2
+#define CLK_SGMII_CDR_FB               3
+#define CLK_SGMII_NR_CLK               4
+
+#endif /* _DT_BINDINGS_CLK_MT7622_H */
+
index 96b63c00249eeb201fef1f994905d586b5608c72..b8337a5fa34775330e28fb1a6c42aae20b520489 100644 (file)
@@ -37,6 +37,9 @@
 #define RPM_SYS_FABRIC_A_CLK                   19
 #define RPM_SFPB_CLK                           20
 #define RPM_SFPB_A_CLK                         21
+#define RPM_SMI_CLK                            22
+#define RPM_SMI_A_CLK                          23
+#define RPM_PLL4_CLK                           24
 
 /* SMD RPM clocks */
 #define RPM_SMD_XO_CLK_SRC                             0
 #define RPM_SMD_CXO_A1_A_PIN                   59
 #define RPM_SMD_CXO_A2_PIN                     60
 #define RPM_SMD_CXO_A2_A_PIN                   61
+#define RPM_SMD_AGGR1_NOC_CLK                  62
+#define RPM_SMD_AGGR1_NOC_A_CLK                        63
+#define RPM_SMD_AGGR2_NOC_CLK                  64
+#define RPM_SMD_AGGR2_NOC_A_CLK                        65
+#define RPM_SMD_MMAXI_CLK                      66
+#define RPM_SMD_MMAXI_A_CLK                    67
+#define RPM_SMD_IPA_CLK                                68
+#define RPM_SMD_IPA_A_CLK                      69
+#define RPM_SMD_CE1_CLK                                70
+#define RPM_SMD_CE1_A_CLK                      71
+#define RPM_SMD_DIV_CLK3                       72
+#define RPM_SMD_DIV_A_CLK3                     73
+#define RPM_SMD_LN_BB_CLK                      74
+#define RPM_SMD_LN_BB_A_CLK                    75
 
 #endif
diff --git a/include/dt-bindings/clock/r8a77970-cpg-mssr.h b/include/dt-bindings/clock/r8a77970-cpg-mssr.h
new file mode 100644 (file)
index 0000000..4146395
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2017 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77970 CPG Core Clocks */
+#define R8A77970_CLK_Z2                        0
+#define R8A77970_CLK_ZR                        1
+#define R8A77970_CLK_ZTR               2
+#define R8A77970_CLK_ZTRD2             3
+#define R8A77970_CLK_ZT                        4
+#define R8A77970_CLK_ZX                        5
+#define R8A77970_CLK_S1D1              6
+#define R8A77970_CLK_S1D2              7
+#define R8A77970_CLK_S1D4              8
+#define R8A77970_CLK_S2D1              9
+#define R8A77970_CLK_S2D2              10
+#define R8A77970_CLK_S2D4              11
+#define R8A77970_CLK_LB                        12
+#define R8A77970_CLK_CL                        13
+#define R8A77970_CLK_ZB3               14
+#define R8A77970_CLK_ZB3D2             15
+#define R8A77970_CLK_DDR               16
+#define R8A77970_CLK_CR                        17
+#define R8A77970_CLK_CRD2              18
+#define R8A77970_CLK_SD0H              19
+#define R8A77970_CLK_SD0               20
+#define R8A77970_CLK_RPC               21
+#define R8A77970_CLK_RPCD2             22
+#define R8A77970_CLK_MSO               23
+#define R8A77970_CLK_CANFD             24
+#define R8A77970_CLK_CSI0              25
+#define R8A77970_CLK_FRAY              26
+#define R8A77970_CLK_CP                        27
+#define R8A77970_CLK_CPEX              28
+#define R8A77970_CLK_R                 29
+#define R8A77970_CLK_OSC               30
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ */
index 37e66b054d64a1bfb96a5fed7ebbb3aa55219db3..f3ba68a25ecb2a82dbb92169928165a11fb642e5 100644 (file)
@@ -26,6 +26,8 @@
 #define ARMCLK                 4
 #define HCLK                   5
 #define PCLK                   6
+#define MPLL                   7
+#define EPLL                   8
 
 /* Special clocks */
 #define SCLK_HSSPI0            16
index c5a53f38d654048e2c9c8bc2bd3f2f850316bb7c..e4fa61be5c759087c3629441258c38200f400286 100644 (file)
@@ -43,6 +43,8 @@
 #define _DT_BINDINGS_CLK_SUN4I_A10_H_
 
 #define CLK_HOSC               1
+#define CLK_PLL_VIDEO0_2X      9
+#define CLK_PLL_VIDEO1_2X      18
 #define CLK_CPU                        20
 
 /* AHB Gates */
index 4482530fb6f5e005ea2e2a2425a91bb61ed85f61..c5d13340184aada0817376a662f7ae247648aeb5 100644 (file)
 #ifndef _DT_BINDINGS_CLK_SUN6I_A31_H_
 #define _DT_BINDINGS_CLK_SUN6I_A31_H_
 
+#define CLK_PLL_VIDEO0_2X      7
+
 #define CLK_PLL_PERIPH         10
 
+#define CLK_PLL_VIDEO1_2X      13
+
 #define CLK_CPU                        18
 
 #define CLK_AHB1_MIPIDSI       23
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
deleted file mode 100644 (file)
index a75d304..0000000
+++ /dev/null
@@ -1,887 +0,0 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MSM_BUS_IDS_H
-#define __MSM_BUS_IDS_H
-
-/* Aggregation types */
-#define AGG_SCHEME_NONE        0
-#define AGG_SCHEME_LEG 1
-#define AGG_SCHEME_1   2
-
-/* Topology related enums */
-#define        MSM_BUS_FAB_DEFAULT 0
-#define        MSM_BUS_FAB_APPSS 0
-#define        MSM_BUS_FAB_SYSTEM 1024
-#define        MSM_BUS_FAB_MMSS 2048
-#define        MSM_BUS_FAB_SYSTEM_FPB 3072
-#define        MSM_BUS_FAB_CPSS_FPB 4096
-
-#define        MSM_BUS_FAB_BIMC 0
-#define        MSM_BUS_FAB_SYS_NOC 1024
-#define        MSM_BUS_FAB_MMSS_NOC 2048
-#define        MSM_BUS_FAB_OCMEM_NOC 3072
-#define        MSM_BUS_FAB_PERIPH_NOC 4096
-#define        MSM_BUS_FAB_CONFIG_NOC 5120
-#define        MSM_BUS_FAB_OCMEM_VNOC 6144
-#define        MSM_BUS_FAB_MMSS_AHB 2049
-#define        MSM_BUS_FAB_A0_NOC 6145
-#define        MSM_BUS_FAB_A1_NOC 6146
-#define        MSM_BUS_FAB_A2_NOC 6147
-#define        MSM_BUS_FAB_GNOC 6148
-#define        MSM_BUS_FAB_CR_VIRT 6149
-
-#define        MSM_BUS_MASTER_FIRST 1
-#define        MSM_BUS_MASTER_AMPSS_M0 1
-#define        MSM_BUS_MASTER_AMPSS_M1 2
-#define        MSM_BUS_APPSS_MASTER_FAB_MMSS 3
-#define        MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
-#define        MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
-#define        MSM_BUS_MASTER_SPS 6
-#define        MSM_BUS_MASTER_ADM_PORT0 7
-#define        MSM_BUS_MASTER_ADM_PORT1 8
-#define        MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
-#define        MSM_BUS_MASTER_ADM1_PORT1 10
-#define        MSM_BUS_MASTER_LPASS_PROC 11
-#define        MSM_BUS_MASTER_MSS_PROCI 12
-#define        MSM_BUS_MASTER_MSS_PROCD 13
-#define        MSM_BUS_MASTER_MSS_MDM_PORT0 14
-#define        MSM_BUS_MASTER_LPASS 15
-#define        MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
-#define        MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
-#define        MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
-#define        MSM_BUS_MASTER_ADM1_CI 19
-#define        MSM_BUS_MASTER_ADM0_CI 20
-#define        MSM_BUS_MASTER_MSS_MDM_PORT1 21
-#define        MSM_BUS_MASTER_MDP_PORT0 22
-#define        MSM_BUS_MASTER_MDP_PORT1 23
-#define        MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
-#define        MSM_BUS_MASTER_ROTATOR 25
-#define        MSM_BUS_MASTER_GRAPHICS_3D 26
-#define        MSM_BUS_MASTER_JPEG_DEC 27
-#define        MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
-#define        MSM_BUS_MASTER_VFE 29
-#define        MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
-#define        MSM_BUS_MASTER_VPE 30
-#define        MSM_BUS_MASTER_JPEG_ENC 31
-#define        MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
-#define        MSM_BUS_MMSS_MASTER_APPS_FAB 33
-#define        MSM_BUS_MASTER_HD_CODEC_PORT0 34
-#define        MSM_BUS_MASTER_HD_CODEC_PORT1 35
-#define        MSM_BUS_MASTER_SPDM 36
-#define        MSM_BUS_MASTER_RPM 37
-#define        MSM_BUS_MASTER_MSS 38
-#define        MSM_BUS_MASTER_RIVA 39
-#define        MSM_BUS_MASTER_SNOC_VMEM 40
-#define        MSM_BUS_MASTER_MSS_SW_PROC 41
-#define        MSM_BUS_MASTER_MSS_FW_PROC 42
-#define        MSM_BUS_MASTER_HMSS 43
-#define        MSM_BUS_MASTER_GSS_NAV 44
-#define        MSM_BUS_MASTER_PCIE 45
-#define        MSM_BUS_MASTER_SATA 46
-#define        MSM_BUS_MASTER_CRYPTO 47
-#define        MSM_BUS_MASTER_VIDEO_CAP 48
-#define        MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
-#define        MSM_BUS_MASTER_VIDEO_ENC 50
-#define        MSM_BUS_MASTER_VIDEO_DEC 51
-#define        MSM_BUS_MASTER_LPASS_AHB 52
-#define        MSM_BUS_MASTER_QDSS_BAM 53
-#define        MSM_BUS_MASTER_SNOC_CFG 54
-#define        MSM_BUS_MASTER_CRYPTO_CORE0 55
-#define        MSM_BUS_MASTER_CRYPTO_CORE1 56
-#define        MSM_BUS_MASTER_MSS_NAV 57
-#define        MSM_BUS_MASTER_OCMEM_DMA 58
-#define        MSM_BUS_MASTER_WCSS 59
-#define        MSM_BUS_MASTER_QDSS_ETR 60
-#define        MSM_BUS_MASTER_USB3 61
-#define        MSM_BUS_MASTER_JPEG 62
-#define        MSM_BUS_MASTER_VIDEO_P0 63
-#define        MSM_BUS_MASTER_VIDEO_P1 64
-#define        MSM_BUS_MASTER_MSS_PROC 65
-#define        MSM_BUS_MASTER_JPEG_OCMEM 66
-#define        MSM_BUS_MASTER_MDP_OCMEM 67
-#define        MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
-#define        MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
-#define        MSM_BUS_MASTER_VFE_OCMEM 70
-#define        MSM_BUS_MASTER_CNOC_ONOC_CFG 71
-#define        MSM_BUS_MASTER_RPM_INST 72
-#define        MSM_BUS_MASTER_RPM_DATA 73
-#define        MSM_BUS_MASTER_RPM_SYS 74
-#define        MSM_BUS_MASTER_DEHR 75
-#define        MSM_BUS_MASTER_QDSS_DAP 76
-#define        MSM_BUS_MASTER_TIC 77
-#define        MSM_BUS_MASTER_SDCC_1 78
-#define        MSM_BUS_MASTER_SDCC_3 79
-#define        MSM_BUS_MASTER_SDCC_4 80
-#define        MSM_BUS_MASTER_SDCC_2 81
-#define        MSM_BUS_MASTER_TSIF 82
-#define        MSM_BUS_MASTER_BAM_DMA 83
-#define        MSM_BUS_MASTER_BLSP_2 84
-#define        MSM_BUS_MASTER_USB_HSIC 85
-#define        MSM_BUS_MASTER_BLSP_1 86
-#define        MSM_BUS_MASTER_USB_HS 87
-#define        MSM_BUS_MASTER_PNOC_CFG 88
-#define        MSM_BUS_MASTER_V_OCMEM_GFX3D 89
-#define        MSM_BUS_MASTER_IPA 90
-#define        MSM_BUS_MASTER_QPIC 91
-#define        MSM_BUS_MASTER_MDPE 92
-#define        MSM_BUS_MASTER_USB_HS2 93
-#define        MSM_BUS_MASTER_VPU 94
-#define        MSM_BUS_MASTER_UFS 95
-#define        MSM_BUS_MASTER_BCAST 96
-#define        MSM_BUS_MASTER_CRYPTO_CORE2 97
-#define        MSM_BUS_MASTER_EMAC 98
-#define        MSM_BUS_MASTER_VPU_1 99
-#define        MSM_BUS_MASTER_PCIE_1 100
-#define        MSM_BUS_MASTER_USB3_1 101
-#define        MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
-#define        MSM_BUS_MASTER_CNOC_MNOC_CFG 103
-#define        MSM_BUS_MASTER_TCU_0 104
-#define        MSM_BUS_MASTER_TCU_1 105
-#define        MSM_BUS_MASTER_CPP 106
-#define        MSM_BUS_MASTER_AUDIO 107
-#define        MSM_BUS_MASTER_PCIE_2 108
-#define        MSM_BUS_MASTER_VFE1 109
-#define        MSM_BUS_MASTER_XM_USB_HS1 110
-#define        MSM_BUS_MASTER_PCNOC_BIMC_1 111
-#define        MSM_BUS_MASTER_BIMC_PCNOC   112
-#define        MSM_BUS_MASTER_XI_USB_HSIC  113
-#define        MSM_BUS_MASTER_SGMII        114
-#define        MSM_BUS_SPMI_FETCHER 115
-#define        MSM_BUS_MASTER_GNOC_BIMC 116
-#define        MSM_BUS_MASTER_CRVIRT_A2NOC 117
-#define        MSM_BUS_MASTER_CNOC_A2NOC 118
-#define        MSM_BUS_MASTER_WLAN 119
-#define        MSM_BUS_MASTER_MSS_CE 120
-#define        MSM_BUS_MASTER_CDSP_PROC 121
-#define        MSM_BUS_MASTER_GNOC_SNOC 122
-#define        MSM_BUS_MASTER_PIMEM 123
-#define        MSM_BUS_MASTER_MASTER_LAST 124
-
-#define        MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
-#define        MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
-
-#define        MSM_BUS_SNOC_MM_INT_0 10000
-#define        MSM_BUS_SNOC_MM_INT_1 10001
-#define        MSM_BUS_SNOC_MM_INT_2 10002
-#define        MSM_BUS_SNOC_MM_INT_BIMC 10003
-#define        MSM_BUS_SNOC_INT_0 10004
-#define        MSM_BUS_SNOC_INT_1 10005
-#define        MSM_BUS_SNOC_INT_BIMC 10006
-#define        MSM_BUS_SNOC_BIMC_0_MAS 10007
-#define        MSM_BUS_SNOC_BIMC_1_MAS 10008
-#define        MSM_BUS_SNOC_QDSS_INT 10009
-#define        MSM_BUS_PNOC_SNOC_MAS 10010
-#define        MSM_BUS_PNOC_SNOC_SLV 10011
-#define        MSM_BUS_PNOC_INT_0 10012
-#define        MSM_BUS_PNOC_INT_1 10013
-#define        MSM_BUS_PNOC_M_0 10014
-#define        MSM_BUS_PNOC_M_1 10015
-#define        MSM_BUS_BIMC_SNOC_MAS 10016
-#define        MSM_BUS_BIMC_SNOC_SLV 10017
-#define        MSM_BUS_PNOC_SLV_0 10018
-#define        MSM_BUS_PNOC_SLV_1 10019
-#define        MSM_BUS_PNOC_SLV_2 10020
-#define        MSM_BUS_PNOC_SLV_3 10021
-#define        MSM_BUS_PNOC_SLV_4 10022
-#define        MSM_BUS_PNOC_SLV_8 10023
-#define        MSM_BUS_PNOC_SLV_9 10024
-#define        MSM_BUS_SNOC_BIMC_0_SLV 10025
-#define        MSM_BUS_SNOC_BIMC_1_SLV 10026
-#define        MSM_BUS_MNOC_BIMC_MAS 10027
-#define        MSM_BUS_MNOC_BIMC_SLV 10028
-#define        MSM_BUS_BIMC_MNOC_MAS 10029
-#define        MSM_BUS_BIMC_MNOC_SLV 10030
-#define        MSM_BUS_SNOC_BIMC_MAS 10031
-#define        MSM_BUS_SNOC_BIMC_SLV 10032
-#define        MSM_BUS_CNOC_SNOC_MAS 10033
-#define        MSM_BUS_CNOC_SNOC_SLV 10034
-#define        MSM_BUS_SNOC_CNOC_MAS 10035
-#define        MSM_BUS_SNOC_CNOC_SLV 10036
-#define        MSM_BUS_OVNOC_SNOC_MAS 10037
-#define        MSM_BUS_OVNOC_SNOC_SLV 10038
-#define        MSM_BUS_SNOC_OVNOC_MAS 10039
-#define        MSM_BUS_SNOC_OVNOC_SLV 10040
-#define        MSM_BUS_SNOC_PNOC_MAS 10041
-#define        MSM_BUS_SNOC_PNOC_SLV 10042
-#define        MSM_BUS_BIMC_INT_APPS_EBI 10043
-#define        MSM_BUS_BIMC_INT_APPS_SNOC 10044
-#define        MSM_BUS_SNOC_BIMC_2_MAS 10045
-#define        MSM_BUS_SNOC_BIMC_2_SLV 10046
-#define        MSM_BUS_PNOC_SLV_5      10047
-#define        MSM_BUS_PNOC_SLV_7      10048
-#define        MSM_BUS_PNOC_INT_2 10049
-#define        MSM_BUS_PNOC_INT_3 10050
-#define        MSM_BUS_PNOC_INT_4 10051
-#define        MSM_BUS_PNOC_INT_5 10052
-#define        MSM_BUS_PNOC_INT_6 10053
-#define        MSM_BUS_PNOC_INT_7 10054
-#define        MSM_BUS_BIMC_SNOC_1_MAS 10055
-#define        MSM_BUS_BIMC_SNOC_1_SLV 10056
-#define        MSM_BUS_PNOC_A1NOC_MAS 10057
-#define        MSM_BUS_PNOC_A1NOC_SLV 10058
-#define        MSM_BUS_CNOC_A1NOC_MAS 10059
-#define        MSM_BUS_A0NOC_SNOC_MAS 10060
-#define        MSM_BUS_A0NOC_SNOC_SLV 10061
-#define        MSM_BUS_A1NOC_SNOC_SLV 10062
-#define        MSM_BUS_A1NOC_SNOC_MAS 10063
-#define        MSM_BUS_A2NOC_SNOC_MAS 10064
-#define        MSM_BUS_A2NOC_SNOC_SLV 10065
-#define        MSM_BUS_SNOC_INT_2 10066
-#define        MSM_BUS_A0NOC_QDSS_INT  10067
-#define        MSM_BUS_INT_LAST 10068
-
-#define        MSM_BUS_INT_TEST_ID     20000
-#define        MSM_BUS_INT_TEST_LAST   20050
-
-#define        MSM_BUS_SLAVE_FIRST 512
-#define        MSM_BUS_SLAVE_EBI_CH0 512
-#define        MSM_BUS_SLAVE_EBI_CH1 513
-#define        MSM_BUS_SLAVE_AMPSS_L2 514
-#define        MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
-#define        MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
-#define        MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
-#define        MSM_BUS_SLAVE_SPS 518
-#define        MSM_BUS_SLAVE_SYSTEM_IMEM 519
-#define        MSM_BUS_SLAVE_AMPSS 520
-#define        MSM_BUS_SLAVE_MSS 521
-#define        MSM_BUS_SLAVE_LPASS 522
-#define        MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
-#define        MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
-#define        MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
-#define        MSM_BUS_SLAVE_CORESIGHT 526
-#define        MSM_BUS_SLAVE_RIVA 527
-#define        MSM_BUS_SLAVE_SMI 528
-#define        MSM_BUS_MMSS_SLAVE_FAB_APPS 529
-#define        MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
-#define        MSM_BUS_SLAVE_MM_IMEM 531
-#define        MSM_BUS_SLAVE_CRYPTO 532
-#define        MSM_BUS_SLAVE_SPDM 533
-#define        MSM_BUS_SLAVE_RPM 534
-#define        MSM_BUS_SLAVE_RPM_MSG_RAM 535
-#define        MSM_BUS_SLAVE_MPM 536
-#define        MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
-#define        MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
-#define        MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
-#define        MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
-#define        MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
-#define        MSM_BUS_SLAVE_GSBI1_UART 542
-#define        MSM_BUS_SLAVE_GSBI2_UART 543
-#define        MSM_BUS_SLAVE_GSBI3_UART 544
-#define        MSM_BUS_SLAVE_GSBI4_UART 545
-#define        MSM_BUS_SLAVE_GSBI5_UART 546
-#define        MSM_BUS_SLAVE_GSBI6_UART 547
-#define        MSM_BUS_SLAVE_GSBI7_UART 548
-#define        MSM_BUS_SLAVE_GSBI8_UART 549
-#define        MSM_BUS_SLAVE_GSBI9_UART 550
-#define        MSM_BUS_SLAVE_GSBI10_UART 551
-#define        MSM_BUS_SLAVE_GSBI11_UART 552
-#define        MSM_BUS_SLAVE_GSBI12_UART 553
-#define        MSM_BUS_SLAVE_GSBI1_QUP 554
-#define        MSM_BUS_SLAVE_GSBI2_QUP 555
-#define        MSM_BUS_SLAVE_GSBI3_QUP 556
-#define        MSM_BUS_SLAVE_GSBI4_QUP 557
-#define        MSM_BUS_SLAVE_GSBI5_QUP 558
-#define        MSM_BUS_SLAVE_GSBI6_QUP 559
-#define        MSM_BUS_SLAVE_GSBI7_QUP 560
-#define        MSM_BUS_SLAVE_GSBI8_QUP 561
-#define        MSM_BUS_SLAVE_GSBI9_QUP 562
-#define        MSM_BUS_SLAVE_GSBI10_QUP 563
-#define        MSM_BUS_SLAVE_GSBI11_QUP 564
-#define        MSM_BUS_SLAVE_GSBI12_QUP 565
-#define        MSM_BUS_SLAVE_EBI2_NAND 566
-#define        MSM_BUS_SLAVE_EBI2_CS0 567
-#define        MSM_BUS_SLAVE_EBI2_CS1 568
-#define        MSM_BUS_SLAVE_EBI2_CS2 569
-#define        MSM_BUS_SLAVE_EBI2_CS3 570
-#define        MSM_BUS_SLAVE_EBI2_CS4 571
-#define        MSM_BUS_SLAVE_EBI2_CS5 572
-#define        MSM_BUS_SLAVE_USB_FS1 573
-#define        MSM_BUS_SLAVE_USB_FS2 574
-#define        MSM_BUS_SLAVE_TSIF 575
-#define        MSM_BUS_SLAVE_MSM_TSSC 576
-#define        MSM_BUS_SLAVE_MSM_PDM 577
-#define        MSM_BUS_SLAVE_MSM_DIMEM 578
-#define        MSM_BUS_SLAVE_MSM_TCSR 579
-#define        MSM_BUS_SLAVE_MSM_PRNG 580
-#define        MSM_BUS_SLAVE_GSS 581
-#define        MSM_BUS_SLAVE_SATA 582
-#define        MSM_BUS_SLAVE_USB3 583
-#define        MSM_BUS_SLAVE_WCSS 584
-#define        MSM_BUS_SLAVE_OCIMEM 585
-#define        MSM_BUS_SLAVE_SNOC_OCMEM 586
-#define        MSM_BUS_SLAVE_SERVICE_SNOC 587
-#define        MSM_BUS_SLAVE_QDSS_STM 588
-#define        MSM_BUS_SLAVE_CAMERA_CFG 589
-#define        MSM_BUS_SLAVE_DISPLAY_CFG 590
-#define        MSM_BUS_SLAVE_OCMEM_CFG 591
-#define        MSM_BUS_SLAVE_CPR_CFG 592
-#define        MSM_BUS_SLAVE_CPR_XPU_CFG 593
-#define        MSM_BUS_SLAVE_MISC_CFG 594
-#define        MSM_BUS_SLAVE_MISC_XPU_CFG 595
-#define        MSM_BUS_SLAVE_VENUS_CFG 596
-#define        MSM_BUS_SLAVE_MISC_VENUS_CFG 597
-#define        MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
-#define        MSM_BUS_SLAVE_MMSS_CLK_CFG 599
-#define        MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
-#define        MSM_BUS_SLAVE_MNOC_MPU_CFG 601
-#define        MSM_BUS_SLAVE_ONOC_MPU_CFG 602
-#define        MSM_BUS_SLAVE_SERVICE_MNOC 603
-#define        MSM_BUS_SLAVE_OCMEM 604
-#define        MSM_BUS_SLAVE_SERVICE_ONOC 605
-#define        MSM_BUS_SLAVE_SDCC_1 606
-#define        MSM_BUS_SLAVE_SDCC_3 607
-#define        MSM_BUS_SLAVE_SDCC_2 608
-#define        MSM_BUS_SLAVE_SDCC_4 609
-#define        MSM_BUS_SLAVE_BAM_DMA 610
-#define        MSM_BUS_SLAVE_BLSP_2 611
-#define        MSM_BUS_SLAVE_USB_HSIC 612
-#define        MSM_BUS_SLAVE_BLSP_1 613
-#define        MSM_BUS_SLAVE_USB_HS 614
-#define        MSM_BUS_SLAVE_PDM 615
-#define        MSM_BUS_SLAVE_PERIPH_APU_CFG 616
-#define        MSM_BUS_SLAVE_PNOC_MPU_CFG 617
-#define        MSM_BUS_SLAVE_PRNG 618
-#define        MSM_BUS_SLAVE_SERVICE_PNOC 619
-#define        MSM_BUS_SLAVE_CLK_CTL 620
-#define        MSM_BUS_SLAVE_CNOC_MSS 621
-#define        MSM_BUS_SLAVE_SECURITY 622
-#define        MSM_BUS_SLAVE_TCSR 623
-#define        MSM_BUS_SLAVE_TLMM 624
-#define        MSM_BUS_SLAVE_CRYPTO_0_CFG 625
-#define        MSM_BUS_SLAVE_CRYPTO_1_CFG 626
-#define        MSM_BUS_SLAVE_IMEM_CFG 627
-#define        MSM_BUS_SLAVE_MESSAGE_RAM 628
-#define        MSM_BUS_SLAVE_BIMC_CFG 629
-#define        MSM_BUS_SLAVE_BOOT_ROM 630
-#define        MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
-#define        MSM_BUS_SLAVE_PMIC_ARB 632
-#define        MSM_BUS_SLAVE_SPDM_WRAPPER 633
-#define        MSM_BUS_SLAVE_DEHR_CFG 634
-#define        MSM_BUS_SLAVE_QDSS_CFG 635
-#define        MSM_BUS_SLAVE_RBCPR_CFG 636
-#define        MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
-#define        MSM_BUS_SLAVE_SNOC_MPU_CFG 638
-#define        MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
-#define        MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
-#define        MSM_BUS_SLAVE_PNOC_CFG 641
-#define        MSM_BUS_SLAVE_SNOC_CFG 642
-#define        MSM_BUS_SLAVE_EBI1_DLL_CFG 643
-#define        MSM_BUS_SLAVE_PHY_APU_CFG 644
-#define        MSM_BUS_SLAVE_EBI1_PHY_CFG 645
-#define        MSM_BUS_SLAVE_SERVICE_CNOC 646
-#define        MSM_BUS_SLAVE_IPS_CFG 647
-#define        MSM_BUS_SLAVE_QPIC 648
-#define        MSM_BUS_SLAVE_DSI_CFG 649
-#define        MSM_BUS_SLAVE_UFS_CFG 650
-#define        MSM_BUS_SLAVE_RBCPR_CX_CFG 651
-#define        MSM_BUS_SLAVE_RBCPR_MX_CFG 652
-#define        MSM_BUS_SLAVE_PCIE_CFG 653
-#define        MSM_BUS_SLAVE_USB_PHYS_CFG 654
-#define        MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
-#define        MSM_BUS_SLAVE_AVSYNC_CFG 656
-#define        MSM_BUS_SLAVE_CRYPTO_2_CFG 657
-#define        MSM_BUS_SLAVE_VPU_CFG 658
-#define        MSM_BUS_SLAVE_BCAST_CFG 659
-#define        MSM_BUS_SLAVE_KLM_CFG 660
-#define        MSM_BUS_SLAVE_GENI_IR_CFG 661
-#define        MSM_BUS_SLAVE_OCMEM_GFX 662
-#define        MSM_BUS_SLAVE_CATS_128 663
-#define        MSM_BUS_SLAVE_OCMEM_64 664
-#define        MSM_BUS_SLAVE_PCIE_0 665
-#define        MSM_BUS_SLAVE_PCIE_1 666
-#define        MSM_BUS_SLAVE_PCIE_0_CFG 667
-#define        MSM_BUS_SLAVE_PCIE_1_CFG 668
-#define        MSM_BUS_SLAVE_SRVC_MNOC 669
-#define        MSM_BUS_SLAVE_USB_HS2 670
-#define        MSM_BUS_SLAVE_AUDIO 671
-#define        MSM_BUS_SLAVE_TCU 672
-#define        MSM_BUS_SLAVE_APPSS 673
-#define        MSM_BUS_SLAVE_PCIE_PARF 674
-#define        MSM_BUS_SLAVE_USB3_PHY_CFG 675
-#define        MSM_BUS_SLAVE_IPA_CFG 676
-#define        MSM_BUS_SLAVE_A0NOC_SNOC 677
-#define        MSM_BUS_SLAVE_A1NOC_SNOC 678
-#define        MSM_BUS_SLAVE_A2NOC_SNOC 679
-#define        MSM_BUS_SLAVE_HMSS_L3 680
-#define        MSM_BUS_SLAVE_PIMEM_CFG 681
-#define        MSM_BUS_SLAVE_DCC_CFG 682
-#define        MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
-#define        MSM_BUS_SLAVE_PCIE_2_CFG 684
-#define        MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
-#define        MSM_BUS_SLAVE_A0NOC_CFG 686
-#define        MSM_BUS_SLAVE_A1NOC_CFG 687
-#define        MSM_BUS_SLAVE_A2NOC_CFG 688
-#define        MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
-#define        MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
-#define        MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
-#define        MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
-#define        MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
-#define        MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
-#define        MSM_BUS_SLAVE_MMAGIC_CFG 695
-#define        MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
-#define        MSM_BUS_SLAVE_SSC_CFG 697
-#define        MSM_BUS_SLAVE_DSA_CFG 698
-#define        MSM_BUS_SLAVE_DSA_MPU_CFG 699
-#define        MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
-#define        MSM_BUS_SLAVE_SMMU_CPP_CFG 701
-#define        MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
-#define        MSM_BUS_SLAVE_SMMU_MDP_CFG 703
-#define        MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
-#define        MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
-#define        MSM_BUS_SLAVE_SMMU_VFE_CFG 706
-#define        MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
-#define        MSM_BUS_SLAVE_VMEM_CFG 708
-#define        MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
-#define        MSM_BUS_SLAVE_VMEM 710
-#define        MSM_BUS_SLAVE_AHB2PHY 711
-#define        MSM_BUS_SLAVE_PIMEM 712
-#define        MSM_BUS_SLAVE_SNOC_VMEM 713
-#define        MSM_BUS_SLAVE_PCIE_2 714
-#define        MSM_BUS_SLAVE_RBCPR_MX 715
-#define        MSM_BUS_SLAVE_RBCPR_CX 716
-#define        MSM_BUS_SLAVE_BIMC_PCNOC 717
-#define        MSM_BUS_SLAVE_PCNOC_BIMC_1 718
-#define        MSM_BUS_SLAVE_SGMII 719
-#define        MSM_BUS_SLAVE_SPMI_FETCHER 720
-#define        MSM_BUS_PNOC_SLV_6 721
-#define        MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
-#define        MSM_BUS_SLAVE_WLAN 723
-#define        MSM_BUS_SLAVE_CRVIRT_A2NOC 724
-#define        MSM_BUS_SLAVE_CNOC_A2NOC 725
-#define        MSM_BUS_SLAVE_GLM 726
-#define        MSM_BUS_SLAVE_GNOC_BIMC 727
-#define        MSM_BUS_SLAVE_GNOC_SNOC 728
-#define        MSM_BUS_SLAVE_QM_CFG 729
-#define        MSM_BUS_SLAVE_TLMM_EAST 730
-#define        MSM_BUS_SLAVE_TLMM_NORTH 731
-#define        MSM_BUS_SLAVE_TLMM_WEST 732
-#define        MSM_BUS_SLAVE_SKL 733
-#define        MSM_BUS_SLAVE_LPASS_TCM 734
-#define        MSM_BUS_SLAVE_TLMM_SOUTH 735
-#define        MSM_BUS_SLAVE_TLMM_CENTER 736
-#define        MSM_BUS_MSS_NAV_CE_MPU_CFG 737
-#define        MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
-#define        MSM_BUS_SLAVE_CDSP 739
-#define        MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
-#define        MSM_BUS_SLAVE_LPASS_MPU_CFG 741
-#define        MSM_BUS_SLAVE_CSI_PHY_CFG 742
-#define        MSM_BUS_SLAVE_LAST 743
-
-#define        MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
-#define        MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
-
-/*
- * ID's used in RPM messages
- */
-#define        ICBID_MASTER_APPSS_PROC 0
-#define        ICBID_MASTER_MSS_PROC 1
-#define        ICBID_MASTER_MNOC_BIMC 2
-#define        ICBID_MASTER_SNOC_BIMC 3
-#define        ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
-#define        ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
-#define        ICBID_MASTER_CNOC_MNOC_CFG 5
-#define        ICBID_MASTER_GFX3D 6
-#define        ICBID_MASTER_JPEG 7
-#define        ICBID_MASTER_MDP 8
-#define        ICBID_MASTER_MDP0 ICBID_MASTER_MDP
-#define        ICBID_MASTER_MDPS ICBID_MASTER_MDP
-#define        ICBID_MASTER_VIDEO 9
-#define        ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
-#define        ICBID_MASTER_VIDEO_P1 10
-#define        ICBID_MASTER_VFE 11
-#define        ICBID_MASTER_VFE0 ICBID_MASTER_VFE
-#define        ICBID_MASTER_CNOC_ONOC_CFG 12
-#define        ICBID_MASTER_JPEG_OCMEM 13
-#define        ICBID_MASTER_MDP_OCMEM 14
-#define        ICBID_MASTER_VIDEO_P0_OCMEM 15
-#define        ICBID_MASTER_VIDEO_P1_OCMEM 16
-#define        ICBID_MASTER_VFE_OCMEM 17
-#define        ICBID_MASTER_LPASS_AHB 18
-#define        ICBID_MASTER_QDSS_BAM 19
-#define        ICBID_MASTER_SNOC_CFG 20
-#define        ICBID_MASTER_BIMC_SNOC 21
-#define        ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
-#define        ICBID_MASTER_CNOC_SNOC 22
-#define        ICBID_MASTER_CRYPTO 23
-#define        ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
-#define        ICBID_MASTER_CRYPTO_CORE1 24
-#define        ICBID_MASTER_LPASS_PROC 25
-#define        ICBID_MASTER_MSS 26
-#define        ICBID_MASTER_MSS_NAV 27
-#define        ICBID_MASTER_OCMEM_DMA 28
-#define        ICBID_MASTER_PNOC_SNOC 29
-#define        ICBID_MASTER_WCSS 30
-#define        ICBID_MASTER_QDSS_ETR 31
-#define        ICBID_MASTER_USB3 32
-#define        ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
-#define        ICBID_MASTER_SDCC_1 33
-#define        ICBID_MASTER_SDCC_3 34
-#define        ICBID_MASTER_SDCC_2 35
-#define        ICBID_MASTER_SDCC_4 36
-#define        ICBID_MASTER_TSIF 37
-#define        ICBID_MASTER_BAM_DMA 38
-#define        ICBID_MASTER_BLSP_2 39
-#define        ICBID_MASTER_USB_HSIC 40
-#define        ICBID_MASTER_BLSP_1 41
-#define        ICBID_MASTER_USB_HS 42
-#define        ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
-#define        ICBID_MASTER_PNOC_CFG 43
-#define        ICBID_MASTER_SNOC_PNOC 44
-#define        ICBID_MASTER_RPM_INST 45
-#define        ICBID_MASTER_RPM_DATA 46
-#define        ICBID_MASTER_RPM_SYS 47
-#define        ICBID_MASTER_DEHR 48
-#define        ICBID_MASTER_QDSS_DAP 49
-#define        ICBID_MASTER_SPDM 50
-#define        ICBID_MASTER_TIC 51
-#define        ICBID_MASTER_SNOC_CNOC 52
-#define        ICBID_MASTER_GFX3D_OCMEM 53
-#define        ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
-#define        ICBID_MASTER_OVIRT_SNOC 54
-#define        ICBID_MASTER_SNOC_OVIRT 55
-#define        ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
-#define        ICBID_MASTER_ONOC_OVIRT 56
-#define        ICBID_MASTER_USB_HS2 57
-#define        ICBID_MASTER_QPIC 58
-#define        ICBID_MASTER_IPA 59
-#define        ICBID_MASTER_DSI 60
-#define        ICBID_MASTER_MDP1 61
-#define        ICBID_MASTER_MDPE ICBID_MASTER_MDP1
-#define        ICBID_MASTER_VPU_PROC 62
-#define        ICBID_MASTER_VPU 63
-#define        ICBID_MASTER_VPU0 ICBID_MASTER_VPU
-#define        ICBID_MASTER_CRYPTO_CORE2 64
-#define        ICBID_MASTER_PCIE_0 65
-#define        ICBID_MASTER_PCIE_1 66
-#define        ICBID_MASTER_SATA 67
-#define        ICBID_MASTER_UFS 68
-#define        ICBID_MASTER_USB3_1 69
-#define        ICBID_MASTER_VIDEO_OCMEM 70
-#define        ICBID_MASTER_VPU1 71
-#define        ICBID_MASTER_VCAP 72
-#define        ICBID_MASTER_EMAC 73
-#define        ICBID_MASTER_BCAST 74
-#define        ICBID_MASTER_MMSS_PROC 75
-#define        ICBID_MASTER_SNOC_BIMC_1 76
-#define        ICBID_MASTER_SNOC_PCNOC 77
-#define        ICBID_MASTER_AUDIO 78
-#define        ICBID_MASTER_MM_INT_0 79
-#define        ICBID_MASTER_MM_INT_1 80
-#define        ICBID_MASTER_MM_INT_2 81
-#define        ICBID_MASTER_MM_INT_BIMC 82
-#define        ICBID_MASTER_MSS_INT 83
-#define        ICBID_MASTER_PCNOC_CFG 84
-#define        ICBID_MASTER_PCNOC_INT_0 85
-#define        ICBID_MASTER_PCNOC_INT_1 86
-#define        ICBID_MASTER_PCNOC_M_0 87
-#define        ICBID_MASTER_PCNOC_M_1 88
-#define        ICBID_MASTER_PCNOC_S_0 89
-#define        ICBID_MASTER_PCNOC_S_1 90
-#define        ICBID_MASTER_PCNOC_S_2 91
-#define        ICBID_MASTER_PCNOC_S_3 92
-#define        ICBID_MASTER_PCNOC_S_4 93
-#define        ICBID_MASTER_PCNOC_S_6 94
-#define        ICBID_MASTER_PCNOC_S_7 95
-#define        ICBID_MASTER_PCNOC_S_8 96
-#define        ICBID_MASTER_PCNOC_S_9 97
-#define        ICBID_MASTER_QDSS_INT 98
-#define        ICBID_MASTER_SNOC_INT_0 99
-#define        ICBID_MASTER_SNOC_INT_1 100
-#define        ICBID_MASTER_SNOC_INT_BIMC 101
-#define        ICBID_MASTER_TCU_0 102
-#define        ICBID_MASTER_TCU_1 103
-#define        ICBID_MASTER_BIMC_INT_0 104
-#define        ICBID_MASTER_BIMC_INT_1 105
-#define        ICBID_MASTER_CAMERA 106
-#define        ICBID_MASTER_RICA 107
-#define        ICBID_MASTER_SNOC_BIMC_2 108
-#define        ICBID_MASTER_BIMC_SNOC_1 109
-#define        ICBID_MASTER_A0NOC_SNOC 110
-#define        ICBID_MASTER_A1NOC_SNOC 111
-#define        ICBID_MASTER_A2NOC_SNOC 112
-#define        ICBID_MASTER_PIMEM 113
-#define        ICBID_MASTER_SNOC_VMEM 114
-#define        ICBID_MASTER_CPP 115
-#define        ICBID_MASTER_CNOC_A1NOC 116
-#define        ICBID_MASTER_PNOC_A1NOC 117
-#define        ICBID_MASTER_HMSS 118
-#define        ICBID_MASTER_PCIE_2 119
-#define        ICBID_MASTER_ROTATOR 120
-#define        ICBID_MASTER_VENUS_VMEM 121
-#define        ICBID_MASTER_DCC 122
-#define        ICBID_MASTER_MCDMA 123
-#define        ICBID_MASTER_PCNOC_INT_2 124
-#define        ICBID_MASTER_PCNOC_INT_3 125
-#define        ICBID_MASTER_PCNOC_INT_4 126
-#define        ICBID_MASTER_PCNOC_INT_5 127
-#define        ICBID_MASTER_PCNOC_INT_6 128
-#define        ICBID_MASTER_PCNOC_S_5 129
-#define        ICBID_MASTER_SENSORS_AHB 130
-#define        ICBID_MASTER_SENSORS_PROC 131
-#define        ICBID_MASTER_QSPI 132
-#define        ICBID_MASTER_VFE1 133
-#define        ICBID_MASTER_SNOC_INT_2 134
-#define        ICBID_MASTER_SMMNOC_BIMC 135
-#define        ICBID_MASTER_CRVIRT_A1NOC 136
-#define        ICBID_MASTER_XM_USB_HS1 137
-#define        ICBID_MASTER_XI_USB_HS1 138
-#define        ICBID_MASTER_PCNOC_BIMC_1 139
-#define        ICBID_MASTER_BIMC_PCNOC 140
-#define        ICBID_MASTER_XI_HSIC 141
-#define        ICBID_MASTER_SGMII  142
-#define        ICBID_MASTER_SPMI_FETCHER 143
-#define        ICBID_MASTER_GNOC_BIMC 144
-#define        ICBID_MASTER_CRVIRT_A2NOC 145
-#define        ICBID_MASTER_CNOC_A2NOC 146
-#define        ICBID_MASTER_WLAN 147
-#define        ICBID_MASTER_MSS_CE 148
-#define        ICBID_MASTER_CDSP_PROC 149
-#define        ICBID_MASTER_GNOC_SNOC 150
-
-#define        ICBID_SLAVE_EBI1 0
-#define        ICBID_SLAVE_APPSS_L2 1
-#define        ICBID_SLAVE_BIMC_SNOC 2
-#define        ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
-#define        ICBID_SLAVE_CAMERA_CFG 3
-#define        ICBID_SLAVE_DISPLAY_CFG 4
-#define        ICBID_SLAVE_OCMEM_CFG 5
-#define        ICBID_SLAVE_CPR_CFG 6
-#define        ICBID_SLAVE_CPR_XPU_CFG 7
-#define        ICBID_SLAVE_MISC_CFG 8
-#define        ICBID_SLAVE_MISC_XPU_CFG 9
-#define        ICBID_SLAVE_VENUS_CFG 10
-#define        ICBID_SLAVE_GFX3D_CFG 11
-#define        ICBID_SLAVE_MMSS_CLK_CFG 12
-#define        ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
-#define        ICBID_SLAVE_MNOC_MPU_CFG 14
-#define        ICBID_SLAVE_ONOC_MPU_CFG 15
-#define        ICBID_SLAVE_MNOC_BIMC 16
-#define        ICBID_SLAVE_SERVICE_MNOC 17
-#define        ICBID_SLAVE_OCMEM 18
-#define        ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
-#define        ICBID_SLAVE_SERVICE_ONOC 19
-#define        ICBID_SLAVE_APPSS 20
-#define        ICBID_SLAVE_LPASS 21
-#define        ICBID_SLAVE_USB3 22
-#define        ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
-#define        ICBID_SLAVE_WCSS 23
-#define        ICBID_SLAVE_SNOC_BIMC 24
-#define        ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
-#define        ICBID_SLAVE_SNOC_CNOC 25
-#define        ICBID_SLAVE_IMEM 26
-#define        ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
-#define        ICBID_SLAVE_SNOC_OVIRT 27
-#define        ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
-#define        ICBID_SLAVE_SNOC_PNOC 28
-#define        ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
-#define        ICBID_SLAVE_SERVICE_SNOC 29
-#define        ICBID_SLAVE_QDSS_STM 30
-#define        ICBID_SLAVE_SDCC_1 31
-#define        ICBID_SLAVE_SDCC_3 32
-#define        ICBID_SLAVE_SDCC_2 33
-#define        ICBID_SLAVE_SDCC_4 34
-#define        ICBID_SLAVE_TSIF 35
-#define        ICBID_SLAVE_BAM_DMA 36
-#define        ICBID_SLAVE_BLSP_2 37
-#define        ICBID_SLAVE_USB_HSIC 38
-#define        ICBID_SLAVE_BLSP_1 39
-#define        ICBID_SLAVE_USB_HS 40
-#define        ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
-#define        ICBID_SLAVE_PDM 41
-#define        ICBID_SLAVE_PERIPH_APU_CFG 42
-#define        ICBID_SLAVE_PNOC_MPU_CFG 43
-#define        ICBID_SLAVE_PRNG 44
-#define        ICBID_SLAVE_PNOC_SNOC 45
-#define        ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
-#define        ICBID_SLAVE_SERVICE_PNOC 46
-#define        ICBID_SLAVE_CLK_CTL 47
-#define        ICBID_SLAVE_CNOC_MSS 48
-#define        ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
-#define        ICBID_SLAVE_SECURITY 49
-#define        ICBID_SLAVE_TCSR 50
-#define        ICBID_SLAVE_TLMM 51
-#define        ICBID_SLAVE_CRYPTO_0_CFG 52
-#define        ICBID_SLAVE_CRYPTO_1_CFG 53
-#define        ICBID_SLAVE_IMEM_CFG 54
-#define        ICBID_SLAVE_MESSAGE_RAM 55
-#define        ICBID_SLAVE_BIMC_CFG 56
-#define        ICBID_SLAVE_BOOT_ROM 57
-#define        ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
-#define        ICBID_SLAVE_PMIC_ARB 59
-#define        ICBID_SLAVE_SPDM_WRAPPER 60
-#define        ICBID_SLAVE_DEHR_CFG 61
-#define        ICBID_SLAVE_MPM 62
-#define        ICBID_SLAVE_QDSS_CFG 63
-#define        ICBID_SLAVE_RBCPR_CFG 64
-#define        ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
-#define        ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
-#define        ICBID_SLAVE_CNOC_MNOC_CFG 66
-#define        ICBID_SLAVE_SNOC_MPU_CFG 67
-#define        ICBID_SLAVE_CNOC_ONOC_CFG 68
-#define        ICBID_SLAVE_PNOC_CFG 69
-#define        ICBID_SLAVE_SNOC_CFG 70
-#define        ICBID_SLAVE_EBI1_DLL_CFG 71
-#define        ICBID_SLAVE_PHY_APU_CFG 72
-#define        ICBID_SLAVE_EBI1_PHY_CFG 73
-#define        ICBID_SLAVE_RPM 74
-#define        ICBID_SLAVE_CNOC_SNOC 75
-#define        ICBID_SLAVE_SERVICE_CNOC 76
-#define        ICBID_SLAVE_OVIRT_SNOC 77
-#define        ICBID_SLAVE_OVIRT_OCMEM 78
-#define        ICBID_SLAVE_USB_HS2 79
-#define        ICBID_SLAVE_QPIC 80
-#define        ICBID_SLAVE_IPS_CFG 81
-#define        ICBID_SLAVE_DSI_CFG 82
-#define        ICBID_SLAVE_USB3_1 83
-#define        ICBID_SLAVE_PCIE_0 84
-#define        ICBID_SLAVE_PCIE_1 85
-#define        ICBID_SLAVE_PSS_SMMU_CFG 86
-#define        ICBID_SLAVE_CRYPTO_2_CFG 87
-#define        ICBID_SLAVE_PCIE_0_CFG 88
-#define        ICBID_SLAVE_PCIE_1_CFG 89
-#define        ICBID_SLAVE_SATA_CFG 90
-#define        ICBID_SLAVE_SPSS_GENI_IR 91
-#define        ICBID_SLAVE_UFS_CFG 92
-#define        ICBID_SLAVE_AVSYNC_CFG 93
-#define        ICBID_SLAVE_VPU_CFG 94
-#define        ICBID_SLAVE_USB_PHY_CFG 95
-#define        ICBID_SLAVE_RBCPR_MX_CFG 96
-#define        ICBID_SLAVE_PCIE_PARF 97
-#define        ICBID_SLAVE_VCAP_CFG 98
-#define        ICBID_SLAVE_EMAC_CFG 99
-#define        ICBID_SLAVE_BCAST_CFG 100
-#define        ICBID_SLAVE_KLM_CFG 101
-#define        ICBID_SLAVE_DISPLAY_PWM 102
-#define        ICBID_SLAVE_GENI 103
-#define        ICBID_SLAVE_SNOC_BIMC_1 104
-#define        ICBID_SLAVE_AUDIO 105
-#define        ICBID_SLAVE_CATS_0 106
-#define        ICBID_SLAVE_CATS_1 107
-#define        ICBID_SLAVE_MM_INT_0 108
-#define        ICBID_SLAVE_MM_INT_1 109
-#define        ICBID_SLAVE_MM_INT_2 110
-#define        ICBID_SLAVE_MM_INT_BIMC 111
-#define        ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
-#define        ICBID_SLAVE_MSS_INT 113
-#define        ICBID_SLAVE_PCNOC_INT_0 114
-#define        ICBID_SLAVE_PCNOC_INT_1 115
-#define        ICBID_SLAVE_PCNOC_M_0 116
-#define        ICBID_SLAVE_PCNOC_M_1 117
-#define        ICBID_SLAVE_PCNOC_S_0 118
-#define        ICBID_SLAVE_PCNOC_S_1 119
-#define        ICBID_SLAVE_PCNOC_S_2 120
-#define        ICBID_SLAVE_PCNOC_S_3 121
-#define        ICBID_SLAVE_PCNOC_S_4 122
-#define        ICBID_SLAVE_PCNOC_S_6 123
-#define        ICBID_SLAVE_PCNOC_S_7 124
-#define        ICBID_SLAVE_PCNOC_S_8 125
-#define        ICBID_SLAVE_PCNOC_S_9 126
-#define        ICBID_SLAVE_PRNG_XPU_CFG 127
-#define        ICBID_SLAVE_QDSS_INT 128
-#define        ICBID_SLAVE_RPM_XPU_CFG 129
-#define        ICBID_SLAVE_SNOC_INT_0 130
-#define        ICBID_SLAVE_SNOC_INT_1 131
-#define        ICBID_SLAVE_SNOC_INT_BIMC 132
-#define        ICBID_SLAVE_TCU 133
-#define        ICBID_SLAVE_BIMC_INT_0 134
-#define        ICBID_SLAVE_BIMC_INT_1 135
-#define        ICBID_SLAVE_RICA_CFG 136
-#define        ICBID_SLAVE_SNOC_BIMC_2 137
-#define        ICBID_SLAVE_BIMC_SNOC_1 138
-#define        ICBID_SLAVE_PNOC_A1NOC 139
-#define        ICBID_SLAVE_SNOC_VMEM 140
-#define        ICBID_SLAVE_A0NOC_SNOC 141
-#define        ICBID_SLAVE_A1NOC_SNOC 142
-#define        ICBID_SLAVE_A2NOC_SNOC 143
-#define        ICBID_SLAVE_A0NOC_CFG 144
-#define        ICBID_SLAVE_A0NOC_MPU_CFG 145
-#define        ICBID_SLAVE_A0NOC_SMMU_CFG 146
-#define        ICBID_SLAVE_A1NOC_CFG 147
-#define        ICBID_SLAVE_A1NOC_MPU_CFG 148
-#define        ICBID_SLAVE_A1NOC_SMMU_CFG 149
-#define        ICBID_SLAVE_A2NOC_CFG 150
-#define        ICBID_SLAVE_A2NOC_MPU_CFG 151
-#define        ICBID_SLAVE_A2NOC_SMMU_CFG 152
-#define        ICBID_SLAVE_AHB2PHY 153
-#define        ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
-#define        ICBID_SLAVE_DCC_CFG 155
-#define        ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
-#define        ICBID_SLAVE_DSA_CFG 157
-#define        ICBID_SLAVE_DSA_MPU_CFG 158
-#define        ICBID_SLAVE_SSC_MPU_CFG 159
-#define        ICBID_SLAVE_HMSS_L3 160
-#define        ICBID_SLAVE_LPASS_SMMU_CFG 161
-#define        ICBID_SLAVE_MMAGIC_CFG 162
-#define        ICBID_SLAVE_PCIE20_AHB2PHY 163
-#define        ICBID_SLAVE_PCIE_2 164
-#define        ICBID_SLAVE_PCIE_2_CFG 165
-#define        ICBID_SLAVE_PIMEM 166
-#define        ICBID_SLAVE_PIMEM_CFG 167
-#define        ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
-#define        ICBID_SLAVE_RBCPR_CX 169
-#define        ICBID_SLAVE_RBCPR_MX 170
-#define        ICBID_SLAVE_SMMU_CPP_CFG 171
-#define        ICBID_SLAVE_SMMU_JPEG_CFG 172
-#define        ICBID_SLAVE_SMMU_MDP_CFG 173
-#define        ICBID_SLAVE_SMMU_ROTATOR_CFG 174
-#define        ICBID_SLAVE_SMMU_VENUS_CFG 175
-#define        ICBID_SLAVE_SMMU_VFE_CFG 176
-#define        ICBID_SLAVE_SSC_CFG 177
-#define        ICBID_SLAVE_VENUS_THROTTLE_CFG 178
-#define        ICBID_SLAVE_VMEM 179
-#define        ICBID_SLAVE_VMEM_CFG 180
-#define        ICBID_SLAVE_QDSS_MPU_CFG 181
-#define        ICBID_SLAVE_USB3_PHY_CFG 182
-#define        ICBID_SLAVE_IPA_CFG 183
-#define        ICBID_SLAVE_PCNOC_INT_2 184
-#define        ICBID_SLAVE_PCNOC_INT_3 185
-#define        ICBID_SLAVE_PCNOC_INT_4 186
-#define        ICBID_SLAVE_PCNOC_INT_5 187
-#define        ICBID_SLAVE_PCNOC_INT_6 188
-#define        ICBID_SLAVE_PCNOC_S_5 189
-#define        ICBID_SLAVE_QSPI 190
-#define        ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
-#define        ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
-#define        ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
-#define        ICBID_SLAVE_MSS_MPU_CFG 194
-#define        ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
-#define        ICBID_SLAVE_SKL 196
-#define        ICBID_SLAVE_SNOC_INT_2 197
-#define        ICBID_SLAVE_SMMNOC_BIMC 198
-#define        ICBID_SLAVE_CRVIRT_A1NOC 199
-#define        ICBID_SLAVE_SGMII        200
-#define        ICBID_SLAVE_QHS4_APPS    201
-#define        ICBID_SLAVE_BIMC_PCNOC   202
-#define        ICBID_SLAVE_PCNOC_BIMC_1 203
-#define        ICBID_SLAVE_SPMI_FETCHER 204
-#define        ICBID_SLAVE_MMSS_SMMU_CFG 205
-#define        ICBID_SLAVE_WLAN 206
-#define        ICBID_SLAVE_CRVIRT_A2NOC 207
-#define        ICBID_SLAVE_CNOC_A2NOC 208
-#define        ICBID_SLAVE_GLM 209
-#define        ICBID_SLAVE_GNOC_BIMC 210
-#define        ICBID_SLAVE_GNOC_SNOC 211
-#define        ICBID_SLAVE_QM_CFG 212
-#define        ICBID_SLAVE_TLMM_EAST 213
-#define        ICBID_SLAVE_TLMM_NORTH 214
-#define        ICBID_SLAVE_TLMM_WEST 215
-#define        ICBID_SLAVE_LPASS_TCM   216
-#define        ICBID_SLAVE_TLMM_SOUTH  217
-#define        ICBID_SLAVE_TLMM_CENTER 218
-#define        ICBID_SLAVE_MSS_NAV_CE_MPU_CFG  219
-#define        ICBID_SLAVE_A2NOC_THROTTLE_CFG  220
-#define        ICBID_SLAVE_CDSP        221
-#define        ICBID_SLAVE_CDSP_SMMU_CFG       222
-#define        ICBID_SLAVE_LPASS_MPU_CFG       223
-#define        ICBID_SLAVE_CSI_PHY_CFG 224
-#endif
index 34dba516ef24ba622c924d82be90d74f9b6d423e..8c896540a72cf4e933556627fa04bce0bf1d2ce3 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/list.h>
 #include <linux/jump_label.h>
 
+#include <linux/irqchip/arm-gic-v4.h>
+
 #define VGIC_V3_MAX_CPUS       255
 #define VGIC_V2_MAX_CPUS       8
 #define VGIC_NR_IRQS_LEGACY     256
@@ -73,6 +75,9 @@ struct vgic_global {
        /* Only needed for the legacy KVM_CREATE_IRQCHIP */
        bool                    can_emulate_gicv2;
 
+       /* Hardware has GICv4? */
+       bool                    has_gicv4;
+
        /* GIC system register CPU interface */
        struct static_key_false gicv3_cpuif;
 
@@ -116,6 +121,7 @@ struct vgic_irq {
        bool hw;                        /* Tied to HW IRQ */
        struct kref refcount;           /* Used for LPIs */
        u32 hwintid;                    /* HW INTID number */
+       unsigned int host_irq;          /* linux irq corresponding to hwintid */
        union {
                u8 targets;                     /* GICv2 target VCPUs mask */
                u32 mpidr;                      /* GICv3 target VCPU */
@@ -232,6 +238,15 @@ struct vgic_dist {
 
        /* used by vgic-debug */
        struct vgic_state_iter *iter;
+
+       /*
+        * GICv4 ITS per-VM data, containing the IRQ domain, the VPE
+        * array, the property table pointer as well as allocation
+        * data. This essentially ties the Linux IRQ core and ITS
+        * together, and avoids leaking KVM's data structures anywhere
+        * else.
+        */
+       struct its_vm           its_vm;
 };
 
 struct vgic_v2_cpu_if {
@@ -250,6 +265,14 @@ struct vgic_v3_cpu_if {
        u32             vgic_ap0r[4];
        u32             vgic_ap1r[4];
        u64             vgic_lr[VGIC_V3_MAX_LRS];
+
+       /*
+        * GICv4 ITS per-VPE data, containing the doorbell IRQ, the
+        * pending table pointer, the its_vm pointer and a few other
+        * HW specific things. As for the its_vm structure, this is
+        * linking the Linux IRQ subsystem and the ITS together.
+        */
+       struct its_vpe  its_vpe;
 };
 
 struct vgic_cpu {
@@ -307,9 +330,10 @@ void kvm_vgic_init_cpu_hardware(void);
 
 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
                        bool level, void *owner);
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+                         u32 vintid);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
 
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
 
@@ -349,4 +373,15 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
 
 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
 
+struct kvm_kernel_irq_routing_entry;
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
+                              struct kvm_kernel_irq_routing_entry *irq_entry);
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
+                                struct kvm_kernel_irq_routing_entry *irq_entry);
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
+
 #endif /* __KVM_ARM_VGIC_H */
index f2deb71958b2dbee591c4c9bf95ee6f1e70f41c4..1030651f83098f9efda4859e01dd95be506e3071 100644 (file)
@@ -15,7 +15,7 @@
 #ifndef _LINUX_BITFIELD_H
 #define _LINUX_BITFIELD_H
 
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 
 /*
  * Bitfield access macros
index c397934f91dd78acb9b051d13f4ee4e5cbe76569..e55e4255a21082325f0888ffcd464615add708b8 100644 (file)
@@ -78,6 +78,7 @@ enum bpf_arg_type {
         * functions that access data on eBPF program stack
         */
        ARG_PTR_TO_MEM,         /* pointer to valid memory (stack, packet, map value) */
+       ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
        ARG_PTR_TO_UNINIT_MEM,  /* pointer to memory does not need to be initialized,
                                 * helper function must fill all bytes or clear
                                 * them in error case.
@@ -334,9 +335,8 @@ extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
 extern const struct bpf_verifier_ops xdp_analyzer_ops;
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
-                                      struct net_device *netdev);
+                                      bool attach_drv);
 struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
 void bpf_prog_sub(struct bpf_prog *prog, int i);
 struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
@@ -425,15 +425,9 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
        return ERR_PTR(-EOPNOTSUPP);
 }
 
-static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
-                                                enum bpf_prog_type type)
-{
-       return ERR_PTR(-EOPNOTSUPP);
-}
-
 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
                                                     enum bpf_prog_type type,
-                                                    struct net_device *netdev)
+                                                    bool attach_drv)
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
@@ -514,9 +508,14 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
 }
 #endif /* CONFIG_BPF_SYSCALL */
 
+static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+                                                enum bpf_prog_type type)
+{
+       return bpf_prog_get_type_dev(ufd, type, false);
+}
+
 int bpf_prog_offload_compile(struct bpf_prog *prog);
 void bpf_prog_offload_destroy(struct bpf_prog *prog);
-u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
 
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
index 07b96aaca2567df8dd3cd6064b861cbd8a961f0e..c561b986bab0ebf886000ea34e377ea789138a3b 100644 (file)
@@ -115,7 +115,7 @@ struct bpf_insn_aux_data {
                struct bpf_map *map_ptr;        /* pointer for call insn into lookup_elem */
        };
        int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
-       int converted_op_size; /* the valid value width after perceived conversion */
+       bool seen; /* this insn was processed by the verifier */
 };
 
 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -171,7 +171,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
 #else
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
+static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
 {
        return -EOPNOTSUPP;
 }
index da4231c905c85a644c4d2223b5167bfe811ad36f..fe5916550da8c5c4da102dd91385eef8123167f6 100644 (file)
@@ -43,6 +43,8 @@ enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
 /* These are defined by the architecture */
 int is_valid_bugaddr(unsigned long addr);
 
+void generic_bug_clear_once(void);
+
 #else  /* !CONFIG_GENERIC_BUG */
 
 static inline enum bug_trap_type report_bug(unsigned long bug_addr,
@@ -51,6 +53,9 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
        return BUG_TRAP_TYPE_BUG;
 }
 
+
+static inline void generic_bug_clear_once(void) {}
+
 #endif /* CONFIG_GENERIC_BUG */
 
 /*
index 5100ec1b5d559f93b93a12b320feefe148f23b30..7c925e6211f12eb91b1236d091eed57180899138 100644 (file)
@@ -682,10 +682,10 @@ struct clk_gpio {
 
 extern const struct clk_ops clk_gpio_gate_ops;
 struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, unsigned gpio, bool active_low,
+               const char *parent_name, struct gpio_desc *gpiod,
                unsigned long flags);
 struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
-               const char *parent_name, unsigned gpio, bool active_low,
+               const char *parent_name, struct gpio_desc *gpiod,
                unsigned long flags);
 void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
 
@@ -701,11 +701,11 @@ void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
 
 extern const struct clk_ops clk_gpio_mux_ops;
 struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, unsigned gpio,
-               bool active_low, unsigned long flags);
+               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+               unsigned long flags);
 struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
-               const char * const *parent_names, u8 num_parents, unsigned gpio,
-               bool active_low, unsigned long flags);
+               const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+               unsigned long flags);
 void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
 
 /**
@@ -815,7 +815,12 @@ int of_clk_add_hw_provider(struct device_node *np,
                           struct clk_hw *(*get)(struct of_phandle_args *clkspec,
                                                 void *data),
                           void *data);
+int devm_of_clk_add_hw_provider(struct device *dev,
+                          struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+                                                void *data),
+                          void *data);
 void of_clk_del_provider(struct device_node *np);
+void devm_of_clk_del_provider(struct device *dev);
 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
                                  void *data);
 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
@@ -847,7 +852,15 @@ static inline int of_clk_add_hw_provider(struct device_node *np,
 {
        return 0;
 }
+static inline int devm_of_clk_add_hw_provider(struct device *dev,
+                          struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+                                                void *data),
+                          void *data)
+{
+       return 0;
+}
 static inline void of_clk_del_provider(struct device_node *np) {}
+static inline void devm_of_clk_del_provider(struct device *dev) {}
 static inline struct clk *of_clk_src_simple_get(
        struct of_phandle_args *clkspec, void *data)
 {
index a06583e41f80520a1425e4231d4e201b8d0a12a7..3b609edffa8fb6527e963b8a629946366df96343 100644 (file)
@@ -16,3 +16,6 @@
  * with any version that can compile the kernel
  */
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#define randomized_struct_fields_start struct {
+#define randomized_struct_fields_end   };
index 3672353a0acda884be51fd3debba26ea50f43b09..188ed9f65517453d5bb97f2466167c069a7c0ae2 100644 (file)
@@ -88,17 +88,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 
 /* Unreachable code */
 #ifdef CONFIG_STACK_VALIDATION
+/*
+ * These macros help objtool understand GCC code flow for unreachable code.
+ * The __COUNTER__ based labels are a hack to make each instance of the macros
+ * unique, to convince GCC not to merge duplicate inline asm statements.
+ */
 #define annotate_reachable() ({                                                \
-       asm("%c0:\n\t"                                                  \
-           ".pushsection .discard.reachable\n\t"                       \
-           ".long %c0b - .\n\t"                                        \
-           ".popsection\n\t" : : "i" (__COUNTER__));                   \
+       asm volatile("%c0:\n\t"                                         \
+                    ".pushsection .discard.reachable\n\t"              \
+                    ".long %c0b - .\n\t"                               \
+                    ".popsection\n\t" : : "i" (__COUNTER__));          \
 })
 #define annotate_unreachable() ({                                      \
-       asm("%c0:\n\t"                                                  \
-           ".pushsection .discard.unreachable\n\t"                     \
-           ".long %c0b - .\n\t"                                        \
-           ".popsection\n\t" : : "i" (__COUNTER__));                   \
+       asm volatile("%c0:\n\t"                                         \
+                    ".pushsection .discard.unreachable\n\t"            \
+                    ".long %c0b - .\n\t"                               \
+                    ".popsection\n\t" : : "i" (__COUNTER__));          \
 })
 #define ASM_UNREACHABLE                                                        \
        "999:\n\t"                                                      \
index e9379e258d6464587bd25b31edaf62856c44b96d..511fbaabf6248b67220c16653e491f74e3f046e7 100644 (file)
@@ -971,8 +971,8 @@ struct lock_manager {
 struct net;
 void locks_start_grace(struct net *, struct lock_manager *);
 void locks_end_grace(struct lock_manager *);
-int locks_in_grace(struct net *);
-int opens_in_grace(struct net *);
+bool locks_in_grace(struct net *);
+bool opens_in_grace(struct net *);
 
 /* that will die - we need it for nfs_lock_info */
 #include <linux/nfs_fs_i.h>
@@ -1872,7 +1872,7 @@ struct super_operations {
  */
 #define __IS_FLG(inode, flg)   ((inode)->i_sb->s_flags & (flg))
 
-static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & MS_RDONLY; }
+static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
 #define IS_RDONLY(inode)       sb_rdonly((inode)->i_sb)
 #define IS_SYNC(inode)         (__IS_FLG(inode, SB_SYNCHRONOUS) || \
                                        ((inode)->i_flags & S_SYNC))
@@ -3088,7 +3088,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
 static inline int vfs_fstatat(int dfd, const char __user *filename,
                              struct kstat *stat, int flags)
 {
-       return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+       return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+                        stat, STATX_BASIC_STATS);
 }
 static inline int vfs_fstat(int fd, struct kstat *stat)
 {
@@ -3194,6 +3195,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
        return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
 }
 
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+       struct inode *inode;
+
+       if (!vma->vm_file)
+               return false;
+       if (!vma_is_dax(vma))
+               return false;
+       inode = file_inode(vma->vm_file);
+       if (inode->i_mode == S_IFCHR)
+               return false; /* device-dax */
+       return true;
+}
+
 static inline int iocb_flags(struct file *file)
 {
        int res = 0;
index e54d257983f28c4e395d9a7bf871652e7f89c3de..2bab81951ced732fb832f3b92cd2b71e840f9b78 100644 (file)
@@ -52,6 +52,30 @@ static inline void early_trace_init(void) { }
 struct module;
 struct ftrace_hash;
 
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
+       defined(CONFIG_DYNAMIC_FTRACE)
+const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym);
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+                          char *type, char *name,
+                          char *module_name, int *exported);
+#else
+static inline const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym)
+{
+       return NULL;
+}
+static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+                                        char *type, char *name,
+                                        char *module_name, int *exported)
+{
+       return -1;
+}
+#endif
+
+
 #ifdef CONFIG_FUNCTION_TRACER
 
 extern int ftrace_enabled;
@@ -79,10 +103,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  * ENABLED - set/unset when ftrace_ops is registered/unregistered
  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
  *           allocated ftrace_ops which need special care
- * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
- *           could be controlled by following calls:
- *             ftrace_function_local_enable
- *             ftrace_function_local_disable
  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
  *            and passed to the callback. If this flag is set, but the
  *            architecture does not support passing regs
@@ -126,21 +146,20 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
        FTRACE_OPS_FL_DYNAMIC                   = 1 << 1,
-       FTRACE_OPS_FL_PER_CPU                   = 1 << 2,
-       FTRACE_OPS_FL_SAVE_REGS                 = 1 << 3,
-       FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 4,
-       FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 5,
-       FTRACE_OPS_FL_STUB                      = 1 << 6,
-       FTRACE_OPS_FL_INITIALIZED               = 1 << 7,
-       FTRACE_OPS_FL_DELETED                   = 1 << 8,
-       FTRACE_OPS_FL_ADDING                    = 1 << 9,
-       FTRACE_OPS_FL_REMOVING                  = 1 << 10,
-       FTRACE_OPS_FL_MODIFYING                 = 1 << 11,
-       FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 12,
-       FTRACE_OPS_FL_IPMODIFY                  = 1 << 13,
-       FTRACE_OPS_FL_PID                       = 1 << 14,
-       FTRACE_OPS_FL_RCU                       = 1 << 15,
-       FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 16,
+       FTRACE_OPS_FL_SAVE_REGS                 = 1 << 2,
+       FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 3,
+       FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 4,
+       FTRACE_OPS_FL_STUB                      = 1 << 5,
+       FTRACE_OPS_FL_INITIALIZED               = 1 << 6,
+       FTRACE_OPS_FL_DELETED                   = 1 << 7,
+       FTRACE_OPS_FL_ADDING                    = 1 << 8,
+       FTRACE_OPS_FL_REMOVING                  = 1 << 9,
+       FTRACE_OPS_FL_MODIFYING                 = 1 << 10,
+       FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 11,
+       FTRACE_OPS_FL_IPMODIFY                  = 1 << 12,
+       FTRACE_OPS_FL_PID                       = 1 << 13,
+       FTRACE_OPS_FL_RCU                       = 1 << 14,
+       FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 15,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -152,8 +171,10 @@ struct ftrace_ops_hash {
 };
 
 void ftrace_free_init_mem(void);
+void ftrace_free_mem(struct module *mod, void *start, void *end);
 #else
 static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 #endif
 
 /*
@@ -173,7 +194,6 @@ struct ftrace_ops {
        unsigned long                   flags;
        void                            *private;
        ftrace_func_t                   saved_func;
-       int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        struct ftrace_ops_hash          local_hash;
        struct ftrace_ops_hash          *func_hash;
@@ -205,55 +225,6 @@ int register_ftrace_function(struct ftrace_ops *ops);
 int unregister_ftrace_function(struct ftrace_ops *ops);
 void clear_ftrace_function(void);
 
-/**
- * ftrace_function_local_enable - enable ftrace_ops on current cpu
- *
- * This function enables tracing on current cpu by decreasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
-{
-       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
-               return;
-
-       (*this_cpu_ptr(ops->disabled))--;
-}
-
-/**
- * ftrace_function_local_disable - disable ftrace_ops on current cpu
- *
- * This function disables tracing on current cpu by increasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
-{
-       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
-               return;
-
-       (*this_cpu_ptr(ops->disabled))++;
-}
-
-/**
- * ftrace_function_local_disabled - returns ftrace_ops disabled value
- *                                  on current cpu
- *
- * This function returns value of ftrace_ops::disabled on current cpu.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
-{
-       WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
-       return *this_cpu_ptr(ops->disabled);
-}
-
 extern void ftrace_stub(unsigned long a0, unsigned long a1,
                        struct ftrace_ops *op, struct pt_regs *regs);
 
@@ -271,6 +242,7 @@ static inline int ftrace_nr_registered_ops(void)
 static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
 static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_STACK_TRACER
@@ -743,7 +715,8 @@ static inline unsigned long get_lock_parent_ip(void)
   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
 #endif
 
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+       (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 #else
index 6dfec4d638df3e7b10811191aba871e421c8d724..872f930f1b06d4bbb21f1d371a1d63d306d695dd 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
+#include <linux/atomic.h>
 
 struct device;
 struct device_node;
@@ -71,7 +72,7 @@ struct gen_pool {
  */
 struct gen_pool_chunk {
        struct list_head next_chunk;    /* next chunk in pool */
-       atomic_t avail;
+       atomic_long_t avail;
        phys_addr_t phys_addr;          /* physical starting address of memory chunk */
        unsigned long start_addr;       /* start address of memory chunk */
        unsigned long end_addr;         /* end address of memory chunk (inclusive) */
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
deleted file mode 100644 (file)
index 127c39d..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_HTIRQ_H
-#define LINUX_HTIRQ_H
-
-struct pci_dev;
-struct irq_data;
-
-struct ht_irq_msg {
-       u32     address_lo;     /* low 32 bits of the ht irq message */
-       u32     address_hi;     /* high 32 bits of the it irq message */
-};
-
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
-                              struct ht_irq_msg *msg);
-
-struct ht_irq_cfg {
-       struct pci_dev *dev;
-        /* Update callback used to cope with buggy hardware */
-       ht_irq_update_t *update;
-       unsigned pos;
-       unsigned idx;
-       struct ht_irq_msg msg;
-};
-
-/* Helper functions.. */
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void mask_ht_irq(struct irq_data *data);
-void unmask_ht_irq(struct irq_data *data);
-
-/* The arch hook for getting things started */
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
-                     ht_irq_update_t *update);
-void arch_teardown_ht_irq(unsigned int irq);
-
-/* For drivers of buggy hardware */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
-
-#endif /* LINUX_HTIRQ_H */
index fbf5b31d47eea91925b9275b9f7fa2784cd5fe56..82a25880714ac69860322edc3e69b4a81b83fb62 100644 (file)
@@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
 }
 #endif
 
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
-       BUG();
-       return 0;
-}
-#endif
-
 #define HUGETLB_ANON_FILE "anon_hugepage"
 
 enum {
index f38b993edacb7c30fd40f3eb9623373aab529397..ea1b31101d9e32fad9a134d6f1444d6f68bd6b73 100644 (file)
@@ -40,7 +40,7 @@
 
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init         __section(.init.text) __cold __inittrace __latent_entropy
+#define __init         __section(.init.text) __cold  __latent_entropy
 #define __initdata     __section(.init.data)
 #define __initconst    __section(.init.rodata)
 #define __exitdata     __section(.exit.data)
 
 #ifdef MODULE
 #define __exitused
-#define __inittrace notrace
 #else
 #define __exitused  __used
-#define __inittrace
 #endif
 
 #define __exit          __section(.exit.text) __exitused __cold notrace
index 8062e6cc607c82074d2e3513cd2db30f07333038..6a532629c98350fcf8c3d6b4d4ed99a8c58e5cc9 100644 (file)
@@ -105,7 +105,6 @@ extern struct group_info init_groups;
        .numbers        = { {                                           \
                .nr             = 0,                                    \
                .ns             = &init_pid_ns,                         \
-               .pid_chain      = { .next = NULL, .pprev = NULL },      \
        }, }                                                            \
 }
 
index d29e1e21bf3f80bdbade2da428a5e5366a549d9f..b1d861caca161a9d3afbb14b4b64f87ddc19e4ba 100644 (file)
  */
 #define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us)  \
 ({ \
-       ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
-       might_sleep_if(sleep_us); \
+       u64 __timeout_us = (timeout_us); \
+       unsigned long __sleep_us = (sleep_us); \
+       ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+       might_sleep_if((__sleep_us) != 0); \
        for (;;) { \
                (val) = op(addr); \
                if (cond) \
                        break; \
-               if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+               if (__timeout_us && \
+                   ktime_compare(ktime_get(), __timeout) > 0) { \
                        (val) = op(addr); \
                        break; \
                } \
-               if (sleep_us) \
-                       usleep_range((sleep_us >> 2) + 1, sleep_us); \
+               if (__sleep_us) \
+                       usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
        } \
        (cond) ? 0 : -ETIMEDOUT; \
 })
  */
 #define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
 ({ \
-       ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+       u64 __timeout_us = (timeout_us); \
+       unsigned long __delay_us = (delay_us); \
+       ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
        for (;;) { \
                (val) = op(addr); \
                if (cond) \
                        break; \
-               if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+               if (__timeout_us && \
+                   ktime_compare(ktime_get(), __timeout) > 0) { \
                        (val) = op(addr); \
                        break; \
                } \
-               if (delay_us) \
-                       udelay(delay_us);       \
+               if (__delay_us) \
+                       udelay(__delay_us);     \
        } \
        (cond) ? 0 : -ETIMEDOUT; \
 })
index 474812abe7731af01d4b3fd7f868ba02fc7bbeb0..b5630c8eb2f3a910b922eeb08153635e88faadbb 100644 (file)
@@ -19,7 +19,10 @@ struct ipc_ids {
        bool tables_initialized;
        struct rw_semaphore rwsem;
        struct idr ipcs_idr;
+       int max_id;
+#ifdef CONFIG_CHECKPOINT_RESTORE
        int next_id;
+#endif
        struct rhashtable key_ht;
 };
 
index b01d06db9101ae73b08952ba6e11abf86c1bbb6d..e140f69163b693b386bdc709719b4efc3d8a30b0 100644 (file)
@@ -211,6 +211,7 @@ struct irq_data {
  * IRQD_MANAGED_SHUTDOWN       - Interrupt was shutdown due to empty affinity
  *                               mask. Applies only to affinity managed irqs.
  * IRQD_SINGLE_TARGET          - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET    - Expected trigger already been set
  */
 enum {
        IRQD_TRIGGER_MASK               = 0xf,
@@ -231,6 +232,7 @@ enum {
        IRQD_IRQ_STARTED                = (1 << 22),
        IRQD_MANAGED_SHUTDOWN           = (1 << 23),
        IRQD_SINGLE_TARGET              = (1 << 24),
+       IRQD_DEFAULT_TRIGGER_SET        = (1 << 25),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -260,18 +262,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d)
        __irqd_to_state(d) |= IRQD_AFFINITY_SET;
 }
 
+static inline bool irqd_trigger_type_was_set(struct irq_data *d)
+{
+       return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
+}
+
 static inline u32 irqd_get_trigger_type(struct irq_data *d)
 {
        return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
 }
 
 /*
- * Must only be called inside irq_chip.irq_set_type() functions.
+ * Must only be called inside irq_chip.irq_set_type() functions or
+ * from the DT/ACPI setup code.
  */
 static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
 {
        __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
        __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
+       __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
 }
 
 static inline bool irqd_is_level_type(struct irq_data *d)
index 447da8ca2156221749876d53c84430c62e729980..fa683ea5c7692ef4ef0c743b619a9ee61ebd9317 100644 (file)
@@ -109,6 +109,7 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map);
 int its_unmap_vlpi(int irq);
 int its_prop_update_vlpi(int irq, u8 config, bool inv);
 
+struct irq_domain_ops;
 int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
 
 #endif
index 708f337d780be3ee4c628fe0422e3a6752d0fb33..bd118a6c60cbf8c5dd28478239e44550d899b57c 100644 (file)
 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
                         2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
-#ifndef CONFIG_64BIT
-# define KALLSYM_FMT "%08lx"
-#else
-# define KALLSYM_FMT "%016lx"
-#endif
-
 struct module;
 
 #ifdef CONFIG_KALLSYMS
index f5d8ce4f4f8667d8fd1002f058c8b56f250d480a..3ecf6f5e3a5f01eb81a67d7456868d43e1fd64ce 100644 (file)
@@ -8,19 +8,23 @@ struct task_struct;
 
 #ifdef CONFIG_KCOV
 
-void kcov_task_init(struct task_struct *t);
-void kcov_task_exit(struct task_struct *t);
-
 enum kcov_mode {
        /* Coverage collection is not enabled yet. */
        KCOV_MODE_DISABLED = 0,
+       /* KCOV was initialized, but tracing mode hasn't been chosen yet. */
+       KCOV_MODE_INIT = 1,
        /*
         * Tracing coverage collection mode.
         * Covered PCs are collected in a per-task buffer.
         */
-       KCOV_MODE_TRACE = 1,
+       KCOV_MODE_TRACE_PC = 2,
+       /* Collecting comparison operands mode. */
+       KCOV_MODE_TRACE_CMP = 3,
 };
 
+void kcov_task_init(struct task_struct *t);
+void kcov_task_exit(struct task_struct *t);
+
 #else
 
 static inline void kcov_task_init(struct task_struct *t) {}
index 4b484ab9e1635e6b412038fa2204d3f297a7c97d..ce51455e2adf631229d21b43e6afb76b2496790c 100644 (file)
@@ -549,7 +549,8 @@ extern enum system_states {
 #define TAINT_UNSIGNED_MODULE          13
 #define TAINT_SOFTLOCKUP               14
 #define TAINT_LIVEPATCH                        15
-#define TAINT_FLAGS_COUNT              16
+#define TAINT_AUX                      16
+#define TAINT_FLAGS_COUNT              17
 
 struct taint_flag {
        char c_true;    /* character printed when tainted */
index 9520fc3c3b9ab376ae571cb9895d0dc838212ffc..05d8fb5a06c491076889f57872a1388690e40438 100644 (file)
@@ -44,7 +44,7 @@ struct key_preparsed_payload {
        const void      *data;          /* Raw data */
        size_t          datalen;        /* Raw datalen */
        size_t          quotalen;       /* Quota length for proposed payload */
-       time_t          expiry;         /* Expiry time of key */
+       time64_t        expiry;         /* Expiry time of key */
 } __randomize_layout;
 
 typedef int (*request_key_actor_t)(struct key_construction *key,
index 8a15cabe928d0ee282742f0c44be26e7af01851d..e58ee10f6e585f59be794f6c27be1d8d148abac0 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/atomic.h>
 #include <linux/assoc_array.h>
 #include <linux/refcount.h>
+#include <linux/time64.h>
 
 #ifdef __KERNEL__
 #include <linux/uidgid.h>
@@ -162,10 +163,10 @@ struct key {
        struct key_user         *user;          /* owner of this key */
        void                    *security;      /* security data for this key */
        union {
-               time_t          expiry;         /* time at which key expires (or 0) */
-               time_t          revoked_at;     /* time at which key was revoked */
+               time64_t        expiry;         /* time at which key expires (or 0) */
+               time64_t        revoked_at;     /* time at which key was revoked */
        };
-       time_t                  last_used_at;   /* last time used for LRU keyring discard */
+       time64_t                last_used_at;   /* last time used for LRU keyring discard */
        kuid_t                  uid;
        kgid_t                  gid;
        key_perm_t              perm;           /* access permissions */
index 3203e36b2ee81f746b6d87c16701cdc567274ebd..c1961761311dbfd5968d6ed64ea91ca3c7d25b0e 100644 (file)
@@ -118,8 +118,7 @@ struct kthread_delayed_work {
 
 #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) {                         \
        .work = KTHREAD_WORK_INIT((dwork).work, (fn)),                  \
-       .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
-                                    (TIMER_DATA_TYPE)&(dwork.timer),   \
+       .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\
                                     TIMER_IRQSAFE),                    \
        }
 
@@ -165,10 +164,9 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
 #define kthread_init_delayed_work(dwork, fn)                           \
        do {                                                            \
                kthread_init_work(&(dwork)->work, (fn));                \
-               __setup_timer(&(dwork)->timer,                          \
-                             (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
-                             (TIMER_DATA_TYPE)&(dwork)->timer,         \
-                             TIMER_IRQSAFE);                           \
+               __init_timer(&(dwork)->timer,                           \
+                            kthread_delayed_work_timer_fn,             \
+                            TIMER_IRQSAFE);                            \
        } while (0)
 
 int kthread_worker_fn(void *worker_ptr);
index 2e754b7c282c8324778b60e7ea57940d9f72c22d..893d6d606cd0a9023e2ab8ef27c523ba8a959a06 100644 (file)
@@ -715,6 +715,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
                         unsigned long len);
 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu);
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
index 895ec0c4942e68c43ca49f1fb77f5112344ca05a..a2246cf670badb96e6c11c4d13b233db0c93388f 100644 (file)
@@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
        new_page = __alloc_pages_nodemask(gfp_mask, order,
                                preferred_nid, nodemask);
 
-       if (new_page && PageTransHuge(page))
+       if (new_page && PageTransHuge(new_page))
                prep_transhuge_page(new_page);
 
        return new_page;
index 4de703d9e21f0de963752a83efa2a29dc6b00b24..3247a3dc7934893b586547a3dd830d2e63b43589 100644 (file)
@@ -36,6 +36,7 @@
 #define HWRNG_MINOR            183
 #define MICROCODE_MINOR                184
 #define IRNET_MINOR            187
+#define D7S_MINOR              193
 #define VFIO_MINOR             196
 #define TUN_MINOR              200
 #define CUSE_MINOR             203
index ee073146aaa7c0085d4e212726be5d60ee317e5a..ea818ff739cdfbb433fc10634ed5ac77eacbc5b7 100644 (file)
@@ -377,6 +377,7 @@ enum page_entry_size {
 struct vm_operations_struct {
        void (*open)(struct vm_area_struct * area);
        void (*close)(struct vm_area_struct * area);
+       int (*split)(struct vm_area_struct * area, unsigned long addr);
        int (*mremap)(struct vm_area_struct * area);
        int (*fault)(struct vm_fault *vmf);
        int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
@@ -1379,6 +1380,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
                    unsigned int gup_flags, struct page **pages, int *locked);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                    struct page **pages, unsigned int gup_flags);
+#ifdef CONFIG_FS_DAX
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+                           unsigned int gup_flags, struct page **pages,
+                           struct vm_area_struct **vmas);
+#else
+static inline long get_user_pages_longterm(unsigned long start,
+               unsigned long nr_pages, unsigned int gup_flags,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+#endif /* CONFIG_FS_DAX */
+
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages);
 
index 6cd0f6b7658b38c73be72e3a085d93a6362d2588..cd55bf14ad5141c1082753e9780417a0d5555501 100644 (file)
@@ -267,7 +267,7 @@ struct mtd_info {
         */
        unsigned int bitflip_threshold;
 
-       // Kernel-only stuff starts here.
+       /* Kernel-only stuff starts here. */
        const char *name;
        int index;
 
@@ -297,10 +297,6 @@ struct mtd_info {
        int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
                       size_t *retlen, void **virt, resource_size_t *phys);
        int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
-       unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
-                                            unsigned long len,
-                                            unsigned long offset,
-                                            unsigned long flags);
        int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
                      size_t *retlen, u_char *buf);
        int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
index fdef72d6e19891081af777b8fc5e75e19a674a57..7ab51bc4a380e281b6d27ed875ab537bd627f423 100644 (file)
@@ -5,11 +5,6 @@
 #include <linux/mtd/rawnand.h>
 
 struct gpio_nand_platdata {
-       int     gpio_nce;
-       int     gpio_nwp;
-       int     gpio_cle;
-       int     gpio_ale;
-       int     gpio_rdy;
        void    (*adjust_parts)(struct gpio_nand_platdata *, size_t);
        struct mtd_partition *parts;
        unsigned int num_parts;
index 2b05f4273babda7bde2247c07bf90f9363a73e7e..749bb08c47728bb2fd0091188e1b08479013a2e5 100644 (file)
@@ -177,6 +177,9 @@ enum nand_ecc_algo {
  */
 #define NAND_NEED_SCRAMBLING   0x00002000
 
+/* Device needs 3rd row address cycle */
+#define NAND_ROW_ADDR_3                0x00004000
+
 /* Options valid for Samsung large page devices */
 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
 
index 1f0a7fc7772feba68fbc99dc3fd25ea5df9028da..d0c66a0975cfa442a97c64d96323206d03aa605d 100644 (file)
@@ -231,11 +231,18 @@ enum spi_nor_option_flags {
        SNOR_F_USE_CLSR         = BIT(5),
 };
 
+/**
+ * struct flash_info - Forward declaration of a structure used internally by
+ *                    spi_nor_scan()
+ */
+struct flash_info;
+
 /**
  * struct spi_nor - Structure for defining a the SPI NOR layer
  * @mtd:               point to a mtd_info structure
  * @lock:              the lock for the read/write/erase/lock/unlock operations
  * @dev:               point to a spi device, or a spi nor controller device.
+ * @info:              spi-nor part JDEC MFR id and other info
  * @page_size:         the page size of the SPI NOR
  * @addr_width:                number of address bytes
  * @erase_opcode:      the opcode for erasing a sector
@@ -262,6 +269,7 @@ enum spi_nor_option_flags {
  * @flash_lock:                [FLASH-SPECIFIC] lock a region of the SPI NOR
  * @flash_unlock:      [FLASH-SPECIFIC] unlock a region of the SPI NOR
  * @flash_is_locked:   [FLASH-SPECIFIC] check if a region of the SPI NOR is
+ * @quad_enable:       [FLASH-SPECIFIC] enables SPI NOR quad mode
  *                     completely locked
  * @priv:              the private data
  */
@@ -269,6 +277,7 @@ struct spi_nor {
        struct mtd_info         mtd;
        struct mutex            lock;
        struct device           *dev;
+       const struct flash_info *info;
        u32                     page_size;
        u8                      addr_width;
        u8                      erase_opcode;
@@ -296,6 +305,7 @@ struct spi_nor {
        int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
        int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
        int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+       int (*quad_enable)(struct spi_nor *nor);
 
        void *priv;
 };
index dc8b4896b77b090e8329bdee9766033a6a3b95fb..b1b0ca7ccb2bacac5d997f97f86848e928bc9da7 100644 (file)
@@ -54,8 +54,9 @@ enum {
        NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
        NETIF_F_GSO_SCTP_BIT,           /* ... SCTP fragmentation */
        NETIF_F_GSO_ESP_BIT,            /* ... ESP with TSO */
+       NETIF_F_GSO_UDP_BIT,            /* ... UFO, deprecated except tuntap */
        /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
-               NETIF_F_GSO_ESP_BIT,
+               NETIF_F_GSO_UDP_BIT,
 
        NETIF_F_FCOE_CRC_BIT,           /* FCoE CRC32 */
        NETIF_F_SCTP_CRC_BIT,           /* SCTP checksum offload */
@@ -132,6 +133,7 @@ enum {
 #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
 #define NETIF_F_GSO_SCTP       __NETIF_F(GSO_SCTP)
 #define NETIF_F_GSO_ESP                __NETIF_F(GSO_ESP)
+#define NETIF_F_GSO_UDP                __NETIF_F(GSO_UDP)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX        __NETIF_F(HW_VLAN_STAG_RX)
 #define NETIF_F_HW_VLAN_STAG_TX        __NETIF_F(HW_VLAN_STAG_TX)
index 6b274bfe489f61332ebd503fcd28dedf0f79b42e..ef789e1d679efd349ed0b20c315defac1f1fb27c 100644 (file)
@@ -4140,6 +4140,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
        BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
 
        return (features & feature) == feature;
 }
index 15cab3967d6dfe435be2bbbe63c83cf06ee7042a..1fbde8a880d9a08b43cf57373f134b306c136dd3 100644 (file)
@@ -104,9 +104,16 @@ extern nodemask_t _unused_nodemask_arg_;
  *
  * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
  */
-#define nodemask_pr_args(maskp)                                \
-       ((maskp) != NULL) ? MAX_NUMNODES : 0,           \
-       ((maskp) != NULL) ? (maskp)->bits : NULL
+#define nodemask_pr_args(maskp)        __nodemask_pr_numnodes(maskp), \
+                               __nodemask_pr_bits(maskp)
+static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
+{
+       return m ? MAX_NUMNODES : 0;
+}
+static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
+{
+       return m ? m->bits : NULL;
+}
 
 /*
  * The inline keyword gives the compiler room to decide to inline, or
index 609e232c00da824739f461dceb1abc4481404f7c..c308964777eb98cd2edc6c36e1a75d9e3d3861f1 100644 (file)
@@ -70,6 +70,7 @@ struct pci_dev;
  * @NTB_TOPO_SEC:      On secondary side of remote ntb.
  * @NTB_TOPO_B2B_USD:  On primary side of local ntb upstream of remote ntb.
  * @NTB_TOPO_B2B_DSD:  On primary side of local ntb downstream of remote ntb.
+ * @NTB_TOPO_SWITCH:   Connected via a switch which supports ntb.
  */
 enum ntb_topo {
        NTB_TOPO_NONE = -1,
@@ -77,6 +78,7 @@ enum ntb_topo {
        NTB_TOPO_SEC,
        NTB_TOPO_B2B_USD,
        NTB_TOPO_B2B_DSD,
+       NTB_TOPO_SWITCH,
 };
 
 static inline int ntb_topo_is_b2b(enum ntb_topo topo)
@@ -97,6 +99,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo)
        case NTB_TOPO_SEC:      return "NTB_TOPO_SEC";
        case NTB_TOPO_B2B_USD:  return "NTB_TOPO_B2B_USD";
        case NTB_TOPO_B2B_DSD:  return "NTB_TOPO_B2B_DSD";
+       case NTB_TOPO_SWITCH:   return "NTB_TOPO_SWITCH";
        }
        return "NTB_TOPO_INVALID";
 }
@@ -730,7 +733,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb)
  * Hardware and topology may support a different number of memory windows.
  * Moreover different peer devices can support different number of memory
  * windows. Simply speaking this method returns the number of possible inbound
- * memory windows to share with specified peer device.
+ * memory windows to share with specified peer device. Note: this may return
+ * zero if the link is not up yet.
  *
  * Return: the number of memory windows.
  */
@@ -751,7 +755,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
  * Get the alignments of an inbound memory window with specified index.
  * NULL may be given for any output parameter if the value is not needed.
  * The alignment and size parameters may be used for allocation of proper
- * shared memory.
+ * shared memory. Note: this must only be called when the link is up.
  *
  * Return: Zero on success, otherwise a negative error number.
  */
@@ -760,6 +764,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
                                   resource_size_t *size_align,
                                   resource_size_t *size_max)
 {
+       if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx)))
+               return -ENOTCONN;
+
        return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
                                      size_max);
 }
index e942558b3585f99e6d705bbe1c81c904c3f52658..9132c5cb41f10eadb92674e67184fb53baf28bd7 100644 (file)
@@ -96,6 +96,17 @@ void set_pfnblock_flags_mask(struct page *page,
 #define set_pageblock_skip(page) \
                        set_pageblock_flags_group(page, 1, PB_migrate_skip,  \
                                                        PB_migrate_skip)
+#else
+static inline bool get_pageblock_skip(struct page *page)
+{
+       return false;
+}
+static inline void clear_pageblock_skip(struct page *page)
+{
+}
+static inline void set_pageblock_skip(struct page *page)
+{
+}
 #endif /* CONFIG_COMPACTION */
 
 #endif /* PAGEBLOCK_FLAGS_H */
index 96c94980d1ff383f9f45bac4ccd2f6ed6361525c..0403894147a3ca970ff7248549919693b53d9e8e 100644 (file)
@@ -1485,12 +1485,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
 static inline void pcie_ecrc_get_policy(char *str) { }
 #endif
 
-#ifdef CONFIG_HT_IRQ
-/* The functions a driver should call */
-int  ht_create_irq(struct pci_dev *dev, int idx);
-void ht_destroy_irq(unsigned int irq);
-#endif /* CONFIG_HT_IRQ */
-
 #ifdef CONFIG_PCI_ATS
 /* Address Translation Service */
 void pci_ats_init(struct pci_dev *dev);
index 874b71a700586340439e34ea73a0dc5d14695bba..2c9c87d8a0c18e5f5c1cf2a8e148504e4f3ad3a9 100644 (file)
@@ -1169,7 +1169,7 @@ extern void perf_event_init(void);
 extern void perf_tp_event(u16 event_type, u64 count, void *record,
                          int entry_size, struct pt_regs *regs,
                          struct hlist_head *head, int rctx,
-                         struct task_struct *task, struct perf_event *event);
+                         struct task_struct *task);
 extern void perf_bp_event(struct perf_event *event, void *data);
 
 #ifndef perf_misc_flags
index dfd684ce078754682c31e631146dc24368336a8b..7633d55d9a24730c0dacdc43ec50bb72b1bbbbe0 100644 (file)
@@ -51,10 +51,8 @@ enum pid_type
  */
 
 struct upid {
-       /* Try to keep pid_chain in the same cacheline as nr for find_vpid */
        int nr;
        struct pid_namespace *ns;
-       struct hlist_node pid_chain;
 };
 
 struct pid
index c78af6061644f35f4dec749665a6422da2aba8ad..49538b172483c4f519100e1617f42d0f897bfbaa 100644 (file)
 #include <linux/nsproxy.h>
 #include <linux/kref.h>
 #include <linux/ns_common.h>
+#include <linux/idr.h>
 
-struct pidmap {
-       atomic_t nr_free;
-       void *page;
-};
-
-#define BITS_PER_PAGE          (PAGE_SIZE * 8)
-#define BITS_PER_PAGE_MASK     (BITS_PER_PAGE-1)
-#define PIDMAP_ENTRIES         ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE)
 
 struct fs_pin;
 
@@ -30,10 +23,9 @@ enum { /* definitions for pid_namespace's hide_pid field */
 
 struct pid_namespace {
        struct kref kref;
-       struct pidmap pidmap[PIDMAP_ENTRIES];
+       struct idr idr;
        struct rcu_head rcu;
-       int last_pid;
-       unsigned int nr_hashed;
+       unsigned int pid_allocated;
        struct task_struct *child_reaper;
        struct kmem_cache *pid_cachep;
        unsigned int level;
@@ -57,7 +49,7 @@ struct pid_namespace {
 
 extern struct pid_namespace init_pid_ns;
 
-#define PIDNS_HASH_ADDING (1U << 31)
+#define PIDNS_ADDING (1U << 31)
 
 #ifdef CONFIG_PID_NS
 static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
@@ -106,6 +98,6 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
 
 extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
 void pidhash_init(void);
-void pidmap_init(void);
+void pid_idr_init(void);
 
 #endif /* _LINUX_PID_NS_H */
index 6a80cfc63e0cbf1484bdd874e9d52a2d28519c93..2dc5e9870fcd7cf4d75128667d9af4626a578b60 100644 (file)
@@ -191,5 +191,6 @@ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
 struct pipe_inode_info *get_pipe_info(struct file *file);
 
 int create_pipe_files(struct file **, int);
+unsigned int round_pipe_size(unsigned int size);
 
 #endif
index 25e267f1970c9bde11d1cb580bcc6625e61d1d1e..619df2431e750794c1081580221e88659241554b 100644 (file)
@@ -64,21 +64,4 @@ struct gpmc_nand_regs {
        void __iomem    *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
        void __iomem    *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
 };
-
-struct omap_nand_platform_data {
-       int                     cs;
-       struct mtd_partition    *parts;
-       int                     nr_parts;
-       bool                    flash_bbt;
-       enum nand_io            xfer_type;
-       int                     devsize;
-       enum omap_ecc           ecc_opt;
-
-       struct device_node      *elm_of_node;
-
-       /* deprecated */
-       struct gpmc_nand_regs   reg;
-       struct device_node      *of_node;
-       bool                    dev_ready;
-};
 #endif
index 905bba92f01598ae535b2d2cd737d1a78e739476..e9b603ee99532266a735555ffb784a939f18f5cf 100644 (file)
@@ -132,10 +132,8 @@ struct va_format {
  */
 #define no_printk(fmt, ...)                            \
 ({                                                     \
-       do {                                            \
-               if (0)                                  \
-                       printk(fmt, ##__VA_ARGS__);     \
-       } while (0);                                    \
+       if (0)                                          \
+               printk(fmt, ##__VA_ARGS__);             \
        0;                                              \
 })
 
index e8357f57069592b28f730bd2f6d65a8fb3e621d5..1fd27d68926bc8e6e6565bf72523ef295c614dbe 100644 (file)
@@ -23,6 +23,19 @@ struct qcom_scm_hdcp_req {
        u32 val;
 };
 
+struct qcom_scm_vmperm {
+       int vmid;
+       int perm;
+};
+
+#define QCOM_SCM_VMID_HLOS       0x3
+#define QCOM_SCM_VMID_MSS_MSA    0xF
+#define QCOM_SCM_PERM_READ       0x4
+#define QCOM_SCM_PERM_WRITE      0x2
+#define QCOM_SCM_PERM_EXEC       0x1
+#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
+#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
+
 #if IS_ENABLED(CONFIG_QCOM_SCM)
 extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
 extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
@@ -37,6 +50,9 @@ extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
                                  phys_addr_t size);
 extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
 extern int qcom_scm_pas_shutdown(u32 peripheral);
+extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+                              unsigned int *src, struct qcom_scm_vmperm *newvm,
+                              int dest_cnt);
 extern void qcom_scm_cpu_power_down(u32 flags);
 extern u32 qcom_scm_get_version(void);
 extern int qcom_scm_set_remote_state(u32 state, u32 id);
index 0ca448c1cb42f32a09c8ad607ee15d1169fcb1ae..23a9c89c7ad9627637a31675af7be4c5bdf070b8 100644 (file)
@@ -22,7 +22,6 @@
 #define _LINUX_RADIX_TREE_H
 
 #include <linux/bitops.h>
-#include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/preempt.h>
index d03da0eb95ca773bbe912f6ad139a303299b6412..e63799a6e89515d0893606530b07e101f39c52fb 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/notifier.h>
 #include <uapi/linux/reboot.h>
 
+struct device;
+
 #define SYS_DOWN       0x0001  /* Notify of system down */
 #define SYS_RESTART    SYS_DOWN
 #define SYS_HALT       0x0002  /* Notify of system halt */
@@ -39,6 +41,8 @@ extern int reboot_force;
 extern int register_reboot_notifier(struct notifier_block *);
 extern int unregister_reboot_notifier(struct notifier_block *);
 
+extern int devm_register_reboot_notifier(struct device *, struct notifier_block *);
+
 extern int register_restart_handler(struct notifier_block *);
 extern int unregister_restart_handler(struct notifier_block *);
 extern void do_kernel_restart(char *cmd);
index a5dc7c98b0a2e5d54814a2cd6d3ecc2ffd6f4ea1..21991d668d35231e625b95d99e2bd6826ba6a47e 100644 (file)
@@ -473,10 +473,10 @@ struct sched_dl_entity {
         * conditions between the inactive timer handler and the wakeup
         * code.
         */
-       int                             dl_throttled      : 1;
-       int                             dl_boosted        : 1;
-       int                             dl_yielded        : 1;
-       int                             dl_non_contending : 1;
+       unsigned int                    dl_throttled      : 1;
+       unsigned int                    dl_boosted        : 1;
+       unsigned int                    dl_yielded        : 1;
+       unsigned int                    dl_non_contending : 1;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index ed06e1c28fc72739774ee0dc83ec001825da0138..bc486ef23f20f91ce3ed183e935399d1e4c55e18 100644 (file)
@@ -568,6 +568,8 @@ enum {
        SKB_GSO_SCTP = 1 << 14,
 
        SKB_GSO_ESP = 1 << 15,
+
+       SKB_GSO_UDP = 1 << 16,
 };
 
 #if BITS_PER_LONG > 32
index 4eff6e68600d979ed5bdbc31bebf875c95ba2332..9f5c6e53f3a5347cac592c6a16ce607b08149144 100644 (file)
@@ -27,6 +27,10 @@ struct qcom_smd_rpm;
 #define QCOM_SMD_RPM_SMPB      0x62706d73
 #define QCOM_SMD_RPM_SPDM      0x63707362
 #define QCOM_SMD_RPM_VSA       0x00617376
+#define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d
+#define QCOM_SMD_RPM_IPA_CLK   0x617069
+#define QCOM_SMD_RPM_CE_CLK    0x6563
+#define QCOM_SMD_RPM_AGGR_CLK  0x72676761
 
 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
                       int state,
index 270bad0e1bed137e9ce44727d9d79fde830e0006..40d2822f0e2f1d1a6aa1a84e2ec820a8f9df392e 100644 (file)
@@ -213,7 +213,7 @@ extern void __init cache_initialize(void);
 extern int cache_register_net(struct cache_detail *cd, struct net *net);
 extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
 
-extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net);
+extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);
 extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
 
 extern void sunrpc_init_cache_detail(struct cache_detail *cd);
index 3b9f0d1dbb808587b608c0941edc43e55d002bae..786ae2255f0566bc50b44368303a9daba049678c 100644 (file)
@@ -47,6 +47,7 @@ struct svc_pool {
        struct svc_pool_stats   sp_stats;       /* statistics on pool operation */
 #define        SP_TASK_PENDING         (0)             /* still work to do even if no
                                                 * xprt is queued. */
+#define SP_CONGESTED           (1)
        unsigned long           sp_flags;
 } ____cacheline_aligned_in_smp;
 
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
new file mode 100644 (file)
index 0000000..09d73d0
--- /dev/null
@@ -0,0 +1,373 @@
+/*
+ * Microsemi Switchtec PCIe Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _SWITCHTEC_H
+#define _SWITCHTEC_H
+
+#include <linux/pci.h>
+#include <linux/cdev.h>
+
+#define MICROSEMI_VENDOR_ID         0x11f8
+#define MICROSEMI_NTB_CLASSCODE     0x068000
+#define MICROSEMI_MGMT_CLASSCODE    0x058000
+
+#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
+#define SWITCHTEC_MAX_PFF_CSR 48
+
+#define SWITCHTEC_EVENT_OCCURRED BIT(0)
+#define SWITCHTEC_EVENT_CLEAR    BIT(0)
+#define SWITCHTEC_EVENT_EN_LOG   BIT(1)
+#define SWITCHTEC_EVENT_EN_CLI   BIT(2)
+#define SWITCHTEC_EVENT_EN_IRQ   BIT(3)
+#define SWITCHTEC_EVENT_FATAL    BIT(4)
+
+enum {
+       SWITCHTEC_GAS_MRPC_OFFSET       = 0x0000,
+       SWITCHTEC_GAS_TOP_CFG_OFFSET    = 0x1000,
+       SWITCHTEC_GAS_SW_EVENT_OFFSET   = 0x1800,
+       SWITCHTEC_GAS_SYS_INFO_OFFSET   = 0x2000,
+       SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
+       SWITCHTEC_GAS_PART_CFG_OFFSET   = 0x4000,
+       SWITCHTEC_GAS_NTB_OFFSET        = 0x10000,
+       SWITCHTEC_GAS_PFF_CSR_OFFSET    = 0x134000,
+};
+
+struct mrpc_regs {
+       u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+       u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+       u32 cmd;
+       u32 status;
+       u32 ret_value;
+} __packed;
+
+enum mrpc_status {
+       SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
+       SWITCHTEC_MRPC_STATUS_DONE = 2,
+       SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
+       SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
+};
+
+struct sw_event_regs {
+       u64 event_report_ctrl;
+       u64 reserved1;
+       u64 part_event_bitmap;
+       u64 reserved2;
+       u32 global_summary;
+       u32 reserved3[3];
+       u32 stack_error_event_hdr;
+       u32 stack_error_event_data;
+       u32 reserved4[4];
+       u32 ppu_error_event_hdr;
+       u32 ppu_error_event_data;
+       u32 reserved5[4];
+       u32 isp_error_event_hdr;
+       u32 isp_error_event_data;
+       u32 reserved6[4];
+       u32 sys_reset_event_hdr;
+       u32 reserved7[5];
+       u32 fw_exception_hdr;
+       u32 reserved8[5];
+       u32 fw_nmi_hdr;
+       u32 reserved9[5];
+       u32 fw_non_fatal_hdr;
+       u32 reserved10[5];
+       u32 fw_fatal_hdr;
+       u32 reserved11[5];
+       u32 twi_mrpc_comp_hdr;
+       u32 twi_mrpc_comp_data;
+       u32 reserved12[4];
+       u32 twi_mrpc_comp_async_hdr;
+       u32 twi_mrpc_comp_async_data;
+       u32 reserved13[4];
+       u32 cli_mrpc_comp_hdr;
+       u32 cli_mrpc_comp_data;
+       u32 reserved14[4];
+       u32 cli_mrpc_comp_async_hdr;
+       u32 cli_mrpc_comp_async_data;
+       u32 reserved15[4];
+       u32 gpio_interrupt_hdr;
+       u32 gpio_interrupt_data;
+       u32 reserved16[4];
+} __packed;
+
+enum {
+       SWITCHTEC_CFG0_RUNNING = 0x04,
+       SWITCHTEC_CFG1_RUNNING = 0x05,
+       SWITCHTEC_IMG0_RUNNING = 0x03,
+       SWITCHTEC_IMG1_RUNNING = 0x07,
+};
+
+struct sys_info_regs {
+       u32 device_id;
+       u32 device_version;
+       u32 firmware_version;
+       u32 reserved1;
+       u32 vendor_table_revision;
+       u32 table_format_version;
+       u32 partition_id;
+       u32 cfg_file_fmt_version;
+       u16 cfg_running;
+       u16 img_running;
+       u32 reserved2[57];
+       char vendor_id[8];
+       char product_id[16];
+       char product_revision[4];
+       char component_vendor[8];
+       u16 component_id;
+       u8 component_revision;
+} __packed;
+
+struct flash_info_regs {
+       u32 flash_part_map_upd_idx;
+
+       struct active_partition_info {
+               u32 address;
+               u32 build_version;
+               u32 build_string;
+       } active_img;
+
+       struct active_partition_info active_cfg;
+       struct active_partition_info inactive_img;
+       struct active_partition_info inactive_cfg;
+
+       u32 flash_length;
+
+       struct partition_info {
+               u32 address;
+               u32 length;
+       } cfg0;
+
+       struct partition_info cfg1;
+       struct partition_info img0;
+       struct partition_info img1;
+       struct partition_info nvlog;
+       struct partition_info vendor[8];
+};
+
+enum {
+       SWITCHTEC_NTB_REG_INFO_OFFSET   = 0x0000,
+       SWITCHTEC_NTB_REG_CTRL_OFFSET   = 0x4000,
+       SWITCHTEC_NTB_REG_DBMSG_OFFSET  = 0x64000,
+};
+
+struct ntb_info_regs {
+       u8  partition_count;
+       u8  partition_id;
+       u16 reserved1;
+       u64 ep_map;
+       u16 requester_id;
+} __packed;
+
+struct part_cfg_regs {
+       u32 status;
+       u32 state;
+       u32 port_cnt;
+       u32 usp_port_mode;
+       u32 usp_pff_inst_id;
+       u32 vep_pff_inst_id;
+       u32 dsp_pff_inst_id[47];
+       u32 reserved1[11];
+       u16 vep_vector_number;
+       u16 usp_vector_number;
+       u32 port_event_bitmap;
+       u32 reserved2[3];
+       u32 part_event_summary;
+       u32 reserved3[3];
+       u32 part_reset_hdr;
+       u32 part_reset_data[5];
+       u32 mrpc_comp_hdr;
+       u32 mrpc_comp_data[5];
+       u32 mrpc_comp_async_hdr;
+       u32 mrpc_comp_async_data[5];
+       u32 dyn_binding_hdr;
+       u32 dyn_binding_data[5];
+       u32 reserved4[159];
+} __packed;
+
+enum {
+       NTB_CTRL_PART_OP_LOCK = 0x1,
+       NTB_CTRL_PART_OP_CFG = 0x2,
+       NTB_CTRL_PART_OP_RESET = 0x3,
+
+       NTB_CTRL_PART_STATUS_NORMAL = 0x1,
+       NTB_CTRL_PART_STATUS_LOCKED = 0x2,
+       NTB_CTRL_PART_STATUS_LOCKING = 0x3,
+       NTB_CTRL_PART_STATUS_CONFIGURING = 0x4,
+       NTB_CTRL_PART_STATUS_RESETTING = 0x5,
+
+       NTB_CTRL_BAR_VALID = 1 << 0,
+       NTB_CTRL_BAR_DIR_WIN_EN = 1 << 4,
+       NTB_CTRL_BAR_LUT_WIN_EN = 1 << 5,
+
+       NTB_CTRL_REQ_ID_EN = 1 << 0,
+
+       NTB_CTRL_LUT_EN = 1 << 0,
+
+       NTB_PART_CTRL_ID_PROT_DIS = 1 << 0,
+};
+
+struct ntb_ctrl_regs {
+       u32 partition_status;
+       u32 partition_op;
+       u32 partition_ctrl;
+       u32 bar_setup;
+       u32 bar_error;
+       u16 lut_table_entries;
+       u16 lut_table_offset;
+       u32 lut_error;
+       u16 req_id_table_size;
+       u16 req_id_table_offset;
+       u32 req_id_error;
+       u32 reserved1[7];
+       struct {
+               u32 ctl;
+               u32 win_size;
+               u64 xlate_addr;
+       } bar_entry[6];
+       u32 reserved2[216];
+       u32 req_id_table[256];
+       u32 reserved3[512];
+       u64 lut_entry[512];
+} __packed;
+
+#define NTB_DBMSG_IMSG_STATUS BIT_ULL(32)
+#define NTB_DBMSG_IMSG_MASK   BIT_ULL(40)
+
+struct ntb_dbmsg_regs {
+       u32 reserved1[1024];
+       u64 odb;
+       u64 odb_mask;
+       u64 idb;
+       u64 idb_mask;
+       u8  idb_vec_map[64];
+       u32 msg_map;
+       u32 reserved2;
+       struct {
+               u32 msg;
+               u32 status;
+       } omsg[4];
+
+       struct {
+               u32 msg;
+               u8  status;
+               u8  mask;
+               u8  src;
+               u8  reserved;
+       } imsg[4];
+
+       u8 reserved3[3928];
+       u8 msix_table[1024];
+       u8 reserved4[3072];
+       u8 pba[24];
+       u8 reserved5[4072];
+} __packed;
+
+enum {
+       SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
+       SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
+       SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
+       SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
+};
+
+struct pff_csr_regs {
+       u16 vendor_id;
+       u16 device_id;
+       u32 pci_cfg_header[15];
+       u32 pci_cap_region[48];
+       u32 pcie_cap_region[448];
+       u32 indirect_gas_window[128];
+       u32 indirect_gas_window_off;
+       u32 reserved[127];
+       u32 pff_event_summary;
+       u32 reserved2[3];
+       u32 aer_in_p2p_hdr;
+       u32 aer_in_p2p_data[5];
+       u32 aer_in_vep_hdr;
+       u32 aer_in_vep_data[5];
+       u32 dpc_hdr;
+       u32 dpc_data[5];
+       u32 cts_hdr;
+       u32 cts_data[5];
+       u32 reserved3[6];
+       u32 hotplug_hdr;
+       u32 hotplug_data[5];
+       u32 ier_hdr;
+       u32 ier_data[5];
+       u32 threshold_hdr;
+       u32 threshold_data[5];
+       u32 power_mgmt_hdr;
+       u32 power_mgmt_data[5];
+       u32 tlp_throttling_hdr;
+       u32 tlp_throttling_data[5];
+       u32 force_speed_hdr;
+       u32 force_speed_data[5];
+       u32 credit_timeout_hdr;
+       u32 credit_timeout_data[5];
+       u32 link_state_hdr;
+       u32 link_state_data[5];
+       u32 reserved4[174];
+} __packed;
+
+struct switchtec_ntb;
+
+struct switchtec_dev {
+       struct pci_dev *pdev;
+       struct device dev;
+       struct cdev cdev;
+
+       int partition;
+       int partition_count;
+       int pff_csr_count;
+       char pff_local[SWITCHTEC_MAX_PFF_CSR];
+
+       void __iomem *mmio;
+       struct mrpc_regs __iomem *mmio_mrpc;
+       struct sw_event_regs __iomem *mmio_sw_event;
+       struct sys_info_regs __iomem *mmio_sys_info;
+       struct flash_info_regs __iomem *mmio_flash_info;
+       struct ntb_info_regs __iomem *mmio_ntb;
+       struct part_cfg_regs __iomem *mmio_part_cfg;
+       struct part_cfg_regs __iomem *mmio_part_cfg_all;
+       struct pff_csr_regs __iomem *mmio_pff_csr;
+
+       /*
+        * The mrpc mutex must be held when accessing the other
+        * mrpc_ fields, alive flag and stuser->state field
+        */
+       struct mutex mrpc_mutex;
+       struct list_head mrpc_queue;
+       int mrpc_busy;
+       struct work_struct mrpc_work;
+       struct delayed_work mrpc_timeout;
+       bool alive;
+
+       wait_queue_head_t event_wq;
+       atomic_t event_cnt;
+
+       struct work_struct link_event_work;
+       void (*link_notifier)(struct switchtec_dev *stdev);
+       u8 link_event_count[SWITCHTEC_MAX_PFF_CSR];
+
+       struct switchtec_ntb *sndev;
+};
+
+static inline struct switchtec_dev *to_stdev(struct device *dev)
+{
+       return container_of(dev, struct switchtec_dev, dev);
+}
+
+extern struct class *switchtec_class;
+
+#endif
index b769ecfcc3bd41aad6fd339ba824c6bb622ac24d..992bc9948232bd27d4987a99c82e8e1896f289bb 100644 (file)
@@ -51,6 +51,9 @@ extern int proc_dointvec_minmax(struct ctl_table *, int,
 extern int proc_douintvec_minmax(struct ctl_table *table, int write,
                                 void __user *buffer, size_t *lenp,
                                 loff_t *ppos);
+extern int proc_dopipe_max_size(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp,
+                               loff_t *ppos);
 extern int proc_dointvec_jiffies(struct ctl_table *, int,
                                 void __user *, size_t *, loff_t *);
 extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
index 7e9011101cb08f674a5c2ba698be5f67367a464c..d315c3d6725c499bde572fa61ab5068007c0ba5d 100644 (file)
@@ -136,13 +136,6 @@ struct timekeeper {
 extern void update_vsyscall(struct timekeeper *tk);
 extern void update_vsyscall_tz(void);
 
-#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
-
-extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
-                               struct clocksource *c, u32 mult,
-                               u64 cycle_last);
-extern void update_vsyscall_tz(void);
-
 #else
 
 static inline void update_vsyscall(struct timekeeper *tk)
index c198ab40c04fb37174e7dbd683abe82715dcfb16..b17bcce58bc493eb70580e483bf20fea54ab2fe7 100644 (file)
@@ -142,12 +142,6 @@ extern bool timekeeping_rtc_skipresume(void);
 
 extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
 
-/*
- * PPS accessor
- */
-extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
-                                       struct timespec64 *ts_real);
-
 /*
  * struct system_time_snapshot - simultaneous raw/real time capture with
  *     counter value
index bf781acfc6d820f555eefed29dbcdc7c8c4f7033..04af640ea95bd011cdec0101798b0aa7810233cb 100644 (file)
@@ -17,8 +17,7 @@ struct timer_list {
         */
        struct hlist_node       entry;
        unsigned long           expires;
-       void                    (*function)(unsigned long);
-       unsigned long           data;
+       void                    (*function)(struct timer_list *);
        u32                     flags;
 
 #ifdef CONFIG_LOCKDEP
@@ -64,13 +63,9 @@ struct timer_list {
 
 #define TIMER_TRACE_FLAGMASK   (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
 
-#define TIMER_DATA_TYPE                unsigned long
-#define TIMER_FUNC_TYPE                void (*)(TIMER_DATA_TYPE)
-
-#define __TIMER_INITIALIZER(_function, _data, _flags) {                \
+#define __TIMER_INITIALIZER(_function, _flags) {               \
                .entry = { .next = TIMER_ENTRY_STATIC },        \
                .function = (_function),                        \
-               .data = (_data),                                \
                .flags = (_flags),                              \
                __TIMER_LOCKDEP_MAP_INITIALIZER(                \
                        __FILE__ ":" __stringify(__LINE__))     \
@@ -78,108 +73,71 @@ struct timer_list {
 
 #define DEFINE_TIMER(_name, _function)                         \
        struct timer_list _name =                               \
-               __TIMER_INITIALIZER((TIMER_FUNC_TYPE)_function, 0, 0)
+               __TIMER_INITIALIZER(_function, 0)
 
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+/*
+ * LOCKDEP and DEBUG timer interfaces.
+ */
+void init_timer_key(struct timer_list *timer,
+                   void (*func)(struct timer_list *), unsigned int flags,
                    const char *name, struct lock_class_key *key);
 
 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 extern void init_timer_on_stack_key(struct timer_list *timer,
+                                   void (*func)(struct timer_list *),
                                    unsigned int flags, const char *name,
                                    struct lock_class_key *key);
-extern void destroy_timer_on_stack(struct timer_list *timer);
 #else
-static inline void destroy_timer_on_stack(struct timer_list *timer) { }
 static inline void init_timer_on_stack_key(struct timer_list *timer,
-                                          unsigned int flags, const char *name,
+                                          void (*func)(struct timer_list *),
+                                          unsigned int flags,
+                                          const char *name,
                                           struct lock_class_key *key)
 {
-       init_timer_key(timer, flags, name, key);
+       init_timer_key(timer, func, flags, name, key);
 }
 #endif
 
 #ifdef CONFIG_LOCKDEP
-#define __init_timer(_timer, _flags)                                   \
+#define __init_timer(_timer, _fn, _flags)                              \
        do {                                                            \
                static struct lock_class_key __key;                     \
-               init_timer_key((_timer), (_flags), #_timer, &__key);    \
+               init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
        } while (0)
 
-#define __init_timer_on_stack(_timer, _flags)                          \
+#define __init_timer_on_stack(_timer, _fn, _flags)                     \
        do {                                                            \
                static struct lock_class_key __key;                     \
-               init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \
+               init_timer_on_stack_key((_timer), (_fn), (_flags),      \
+                                       #_timer, &__key);                \
        } while (0)
 #else
-#define __init_timer(_timer, _flags)                                   \
-       init_timer_key((_timer), (_flags), NULL, NULL)
-#define __init_timer_on_stack(_timer, _flags)                          \
-       init_timer_on_stack_key((_timer), (_flags), NULL, NULL)
+#define __init_timer(_timer, _fn, _flags)                              \
+       init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __init_timer_on_stack(_timer, _fn, _flags)                     \
+       init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL)
 #endif
 
-#define init_timer(timer)                                              \
-       __init_timer((timer), 0)
-
-#define __setup_timer(_timer, _fn, _data, _flags)                      \
-       do {                                                            \
-               __init_timer((_timer), (_flags));                       \
-               (_timer)->function = (_fn);                             \
-               (_timer)->data = (_data);                               \
-       } while (0)
-
-#define __setup_timer_on_stack(_timer, _fn, _data, _flags)             \
-       do {                                                            \
-               __init_timer_on_stack((_timer), (_flags));              \
-               (_timer)->function = (_fn);                             \
-               (_timer)->data = (_data);                               \
-       } while (0)
-
-#define setup_timer(timer, fn, data)                                   \
-       __setup_timer((timer), (fn), (data), 0)
-#define setup_pinned_timer(timer, fn, data)                            \
-       __setup_timer((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer(timer, fn, data)                                \
-       __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer(timer, fn, data)                 \
-       __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
-#define setup_timer_on_stack(timer, fn, data)                          \
-       __setup_timer_on_stack((timer), (fn), (data), 0)
-#define setup_pinned_timer_on_stack(timer, fn, data)                   \
-       __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer_on_stack(timer, fn, data)               \
-       __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer_on_stack(timer, fn, data)                \
-       __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
+/**
+ * timer_setup - prepare a timer for first use
+ * @timer: the timer in question
+ * @callback: the function to call when timer expires
+ * @flags: any TIMER_* flags
+ *
+ * Regular timer initialization should use either DEFINE_TIMER() above,
+ * or timer_setup(). For timers on the stack, timer_setup_on_stack() must
+ * be used and must be balanced with a call to destroy_timer_on_stack().
+ */
+#define timer_setup(timer, callback, flags)                    \
+       __init_timer((timer), (callback), (flags))
 
-#ifndef CONFIG_LOCKDEP
-static inline void timer_setup(struct timer_list *timer,
-                              void (*callback)(struct timer_list *),
-                              unsigned int flags)
-{
-       __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
-                     (TIMER_DATA_TYPE)timer, flags);
-}
+#define timer_setup_on_stack(timer, callback, flags)           \
+       __init_timer_on_stack((timer), (callback), (flags))
 
-static inline void timer_setup_on_stack(struct timer_list *timer,
-                              void (*callback)(struct timer_list *),
-                              unsigned int flags)
-{
-       __setup_timer_on_stack(timer, (TIMER_FUNC_TYPE)callback,
-                              (TIMER_DATA_TYPE)timer, flags);
-}
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void destroy_timer_on_stack(struct timer_list *timer);
 #else
-/*
- * Under LOCKDEP, the timer lock_class_key (set up in __init_timer) needs
- * to be tied to the caller's context, so an inline (above) won't work. We
- * do want to keep the inline for argument type checking, though.
- */
-# define timer_setup(timer, callback, flags)                           \
-               __setup_timer((timer), (TIMER_FUNC_TYPE)(callback),     \
-                             (TIMER_DATA_TYPE)(timer), (flags))
-# define timer_setup_on_stack(timer, callback, flags)                  \
-               __setup_timer_on_stack((timer),                         \
-                                      (TIMER_FUNC_TYPE)(callback),     \
-                                      (TIMER_DATA_TYPE)(timer), (flags))
+static inline void destroy_timer_on_stack(struct timer_list *timer) { }
 #endif
 
 #define from_timer(var, callback_timer, timer_fieldname) \
index 84014ecfa67ff284fc6b657e565e57ea12d9c89e..af44e7c2d577e8f0bd7c50f12ca70deeb4714dcc 100644 (file)
@@ -174,6 +174,11 @@ enum trace_reg {
        TRACE_REG_PERF_UNREGISTER,
        TRACE_REG_PERF_OPEN,
        TRACE_REG_PERF_CLOSE,
+       /*
+        * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
+        * custom action was taken and the default action is not to be
+        * performed.
+        */
        TRACE_REG_PERF_ADD,
        TRACE_REG_PERF_DEL,
 #endif
@@ -542,9 +547,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
 static inline void
 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
                       u64 count, struct pt_regs *regs, void *head,
-                      struct task_struct *task, struct perf_event *event)
+                      struct task_struct *task)
 {
-       perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
+       perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
 }
 
 #endif
index 210034c896e31e725c6de6bfb33b0406c3b1927a..f144216febc642fd70512df9dddefe1a7f119478 100644 (file)
@@ -9,7 +9,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        const struct virtio_net_hdr *hdr,
                                        bool little_endian)
 {
-       unsigned short gso_type = 0;
+       unsigned int gso_type = 0;
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -19,6 +19,9 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        gso_type = SKB_GSO_TCPV6;
                        break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+                       gso_type = SKB_GSO_UDP;
+                       break;
                default:
                        return -EINVAL;
                }
index cd0d7734dc49838eef044dc1a3ad2c47286a72ac..4757cb5077e538feb793cb06bb3d7fcb335430b9 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/device.h>
 #include <linux/acpi.h>
+#include <uapi/linux/wmi.h>
 
 struct wmi_device {
        struct device dev;
@@ -26,13 +27,17 @@ struct wmi_device {
        bool setable;
 };
 
+/* evaluate the ACPI method associated with this device */
+extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
+                                         u8 instance, u32 method_id,
+                                         const struct acpi_buffer *in,
+                                         struct acpi_buffer *out);
+
 /* Caller must kfree the result. */
 extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
                                             u8 instance);
 
-/* Gets another device on the same bus.  Caller must put_device the result. */
-extern struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev,
-                                               const char *guid_string);
+extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
 
 struct wmi_device_id {
        const char *guid_string;
@@ -45,6 +50,8 @@ struct wmi_driver {
        int (*probe)(struct wmi_device *wdev);
        int (*remove)(struct wmi_device *wdev);
        void (*notify)(struct wmi_device *device, union acpi_object *data);
+       long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd,
+                               struct wmi_ioctl_buffer *arg);
 };
 
 extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
index 01a050fc6650ab055315d1dd00f5309bbe263450..4a54ef96aff5b0ba8e9bf53cc754d09c0dd4dd55 100644 (file)
@@ -176,8 +176,7 @@ struct execute_work {
 
 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {                     \
        .work = __WORK_INITIALIZER((n).work, (f)),                      \
-       .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\
-                                    (TIMER_DATA_TYPE)&(n.timer),       \
+       .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
                                     (tflags) | TIMER_IRQSAFE),         \
        }
 
@@ -242,19 +241,17 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 #define __INIT_DELAYED_WORK(_work, _func, _tflags)                     \
        do {                                                            \
                INIT_WORK(&(_work)->work, (_func));                     \
-               __setup_timer(&(_work)->timer,                          \
-                             (TIMER_FUNC_TYPE)delayed_work_timer_fn,   \
-                             (TIMER_DATA_TYPE)&(_work)->timer,         \
-                             (_tflags) | TIMER_IRQSAFE);               \
+               __init_timer(&(_work)->timer,                           \
+                            delayed_work_timer_fn,                     \
+                            (_tflags) | TIMER_IRQSAFE);                \
        } while (0)
 
 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)             \
        do {                                                            \
                INIT_WORK_ONSTACK(&(_work)->work, (_func));             \
-               __setup_timer_on_stack(&(_work)->timer,                 \
-                                      (TIMER_FUNC_TYPE)delayed_work_timer_fn,\
-                                      (TIMER_DATA_TYPE)&(_work)->timer,\
-                                      (_tflags) | TIMER_IRQSAFE);      \
+               __init_timer_on_stack(&(_work)->timer,                  \
+                                     delayed_work_timer_fn,            \
+                                     (_tflags) | TIMER_IRQSAFE);       \
        } while (0)
 
 #define INIT_DELAYED_WORK(_work, _func)                                        \
index f42d85631d1711fd0085141fc1e61b0c31cd1ddd..fdfd04e348f698b3d108228868866072164d31b7 100644 (file)
@@ -308,7 +308,7 @@ static inline void cgroup_writeback_umount(void)
 void laptop_io_completion(struct backing_dev_info *info);
 void laptop_sync_completion(void);
 void laptop_mode_sync(struct work_struct *work);
-void laptop_mode_timer_fn(unsigned long data);
+void laptop_mode_timer_fn(struct timer_list *t);
 #else
 static inline void laptop_sync_completion(void) { }
 #endif
index 5ac169a735f4bdfab76d76f2036488bcf5e0347a..decf6012a40163c564900c3c39e50d2bf48aa64f 100644 (file)
@@ -154,15 +154,12 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
 /**
  * genlmsg_nlhdr - Obtain netlink header from user specified header
  * @user_hdr: user header as returned from genlmsg_put()
- * @family: generic netlink family
  *
  * Returns pointer to netlink header.
  */
-static inline struct nlmsghdr *
-genlmsg_nlhdr(void *user_hdr, const struct genl_family *family)
+static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr)
 {
        return (struct nlmsghdr *)((char *)user_hdr -
-                                  family->hdrsize -
                                   GENL_HDRLEN -
                                   NLMSG_HDRLEN);
 }
@@ -190,16 +187,14 @@ static inline int genlmsg_parse(const struct nlmsghdr *nlh,
  * genl_dump_check_consistent - check if sequence is consistent and advertise if not
  * @cb: netlink callback structure that stores the sequence number
  * @user_hdr: user header as returned from genlmsg_put()
- * @family: generic netlink family
  *
  * Cf. nl_dump_check_consistent(), this just provides a wrapper to make it
  * simpler to use with generic netlink.
  */
 static inline void genl_dump_check_consistent(struct netlink_callback *cb,
-                                             void *user_hdr,
-                                             const struct genl_family *family)
+                                             void *user_hdr)
 {
-       nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr, family));
+       nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr));
 }
 
 /**
index ec14f0d5a3a189f5d461f71e530e425c83d64e0b..f73797e2fa60c51a81c8d7a0e231bd2be0137119 100644 (file)
@@ -767,6 +767,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
 __be32 ipv6_select_ident(struct net *net,
                         const struct in6_addr *daddr,
                         const struct in6_addr *saddr);
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
 
index cc9073e45be90cb78c31c970f154f3686b4f8435..eec143cca1c0f118ac6e0447ebb55c6c7b1c2057 100644 (file)
@@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
  * ieee80211_nullfunc_get - retrieve a nullfunc template
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @qos_ok: QoS NDP is acceptable to the caller, this should be set
+ *     if at all possible
  *
  * Creates a Nullfunc template which can, for example, uploaded to
  * hardware. The template must be updated after association so that correct
  * BSSID and address is used.
  *
+ * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
+ * returned packet will be QoS NDP.
+ *
  * Note: Caller (or hardware) is responsible for setting the
  * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
  *
  * Return: The nullfunc template. %NULL on error.
  */
 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif);
+                                      struct ieee80211_vif *vif,
+                                      bool qos_ok);
 
 /**
  * ieee80211_probereq_get - retrieve a Probe Request template
index 4a5b9a306c69b4139c8811138204410bb791e79b..32ee65a30aff1146dcafcc533e73833e190cb887 100644 (file)
@@ -48,31 +48,32 @@ static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
        /* This uses the crypto implementation of crc32c, which is either
         * implemented w/ hardware support or resolves to __crc32c_le().
         */
-       return crc32c(sum, buff, len);
+       return (__force __wsum)crc32c((__force __u32)sum, buff, len);
 }
 
 static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
                                       int offset, int len)
 {
-       return __crc32c_le_combine(csum, csum2, len);
+       return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
+                                                  (__force __u32)csum2, len);
 }
 
 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
                                        unsigned int offset)
 {
        struct sctphdr *sh = sctp_hdr(skb);
-        __le32 ret, old = sh->checksum;
        const struct skb_checksum_ops ops = {
                .update  = sctp_csum_update,
                .combine = sctp_csum_combine,
        };
+       __le32 old = sh->checksum;
+       __wsum new;
 
        sh->checksum = 0;
-       ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
-                                         ~(__u32)0, &ops));
+       new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops);
        sh->checksum = old;
 
-       return ret;
+       return cpu_to_le32((__force __u32)new);
 }
 
 #endif /* __sctp_checksum_h__ */
index d7d8cba014697602832fe20e414b632104c9f239..906a9c0efa714efa28e0abdf98c9781d64e376d9 100644 (file)
@@ -194,6 +194,11 @@ void sctp_remaddr_proc_exit(struct net *net);
  */
 int sctp_offload_init(void);
 
+/*
+ * sctp/stream_sched.c
+ */
+void sctp_sched_ops_init(void);
+
 /*
  * sctp/stream.c
  */
@@ -444,7 +449,8 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
        if (asoc->user_frag)
                frag = min_t(int, frag, asoc->user_frag);
 
-       frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
+       frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
+                                           sizeof(struct sctp_data_chunk)));
 
        return frag;
 }
index c676550a4c7dd0ea27ac0e14437d0a2b451ef499..5c5da48f65e7c131573970757b54403d348632b2 100644 (file)
@@ -69,4 +69,9 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
 struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
 
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+                            struct sctp_sched_ops *sched_ops);
+void sctp_sched_ops_prio_init(void);
+void sctp_sched_ops_rr_init(void);
+
 #endif /* __sctp_stream_sched_h__ */
index 85ea578195d4edd2d637c6e300e1217f6e9b62e4..4e09398009c10a72478b43d3cffc24ba01612b91 100644 (file)
@@ -539,7 +539,7 @@ void tcp_push_one(struct sock *, unsigned int mss_now);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
-bool tcp_schedule_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
                             const struct sk_buff *next_skb);
 
index 1fb6ad3c5006415ccf31f789f0e4e9b3943961ee..7ae177c8e3993c0c39ac6539d149509b575cc1ec 100644 (file)
@@ -15,6 +15,8 @@ struct scsi_cmnd;
 struct scsi_lun;
 struct scsi_sense_hdr;
 
+typedef unsigned int __bitwise blist_flags_t;
+
 struct scsi_mode_data {
        __u32   length;
        __u16   block_descriptor_length;
@@ -141,7 +143,7 @@ struct scsi_device {
        unsigned char current_tag;      /* current tag */
        struct scsi_target      *sdev_target;   /* used only for single_lun */
 
-       unsigned int    sdev_bflags; /* black/white flags as also found in
+       blist_flags_t           sdev_bflags; /* black/white flags as also found in
                                 * scsi_devinfo.[hc]. For now used only to
                                 * pass settings from slave_alloc to scsi
                                 * core. */
index 3cf125b56c3a9d4236c7ad2cf113d3a78c076f3d..ea67c32e870e9c330d3d89b1a9a1366077869809 100644 (file)
@@ -6,55 +6,55 @@
  */
 
 /* Only scan LUN 0 */
-#define BLIST_NOLUN            ((__force __u32 __bitwise)(1 << 0))
+#define BLIST_NOLUN            ((__force blist_flags_t)(1 << 0))
 /* Known to have LUNs, force scanning.
  * DEPRECATED: Use max_luns=N */
-#define BLIST_FORCELUN         ((__force __u32 __bitwise)(1 << 1))
+#define BLIST_FORCELUN         ((__force blist_flags_t)(1 << 1))
 /* Flag for broken handshaking */
-#define BLIST_BORKEN           ((__force __u32 __bitwise)(1 << 2))
+#define BLIST_BORKEN           ((__force blist_flags_t)(1 << 2))
 /* unlock by special command */
-#define BLIST_KEY              ((__force __u32 __bitwise)(1 << 3))
+#define BLIST_KEY              ((__force blist_flags_t)(1 << 3))
 /* Do not use LUNs in parallel */
-#define BLIST_SINGLELUN                ((__force __u32 __bitwise)(1 << 4))
+#define BLIST_SINGLELUN                ((__force blist_flags_t)(1 << 4))
 /* Buggy Tagged Command Queuing */
-#define BLIST_NOTQ             ((__force __u32 __bitwise)(1 << 5))
+#define BLIST_NOTQ             ((__force blist_flags_t)(1 << 5))
 /* Non consecutive LUN numbering */
-#define BLIST_SPARSELUN                ((__force __u32 __bitwise)(1 << 6))
+#define BLIST_SPARSELUN                ((__force blist_flags_t)(1 << 6))
 /* Avoid LUNS >= 5 */
-#define BLIST_MAX5LUN          ((__force __u32 __bitwise)(1 << 7))
+#define BLIST_MAX5LUN          ((__force blist_flags_t)(1 << 7))
 /* Treat as (removable) CD-ROM */
-#define BLIST_ISROM            ((__force __u32 __bitwise)(1 << 8))
+#define BLIST_ISROM            ((__force blist_flags_t)(1 << 8))
 /* LUNs past 7 on a SCSI-2 device */
-#define BLIST_LARGELUN         ((__force __u32 __bitwise)(1 << 9))
+#define BLIST_LARGELUN         ((__force blist_flags_t)(1 << 9))
 /* override additional length field */
-#define BLIST_INQUIRY_36       ((__force __u32 __bitwise)(1 << 10))
+#define BLIST_INQUIRY_36       ((__force blist_flags_t)(1 << 10))
 /* do not do automatic start on add */
-#define BLIST_NOSTARTONADD     ((__force __u32 __bitwise)(1 << 12))
+#define BLIST_NOSTARTONADD     ((__force blist_flags_t)(1 << 12))
 /* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
-#define BLIST_REPORTLUN2       ((__force __u32 __bitwise)(1 << 17))
+#define BLIST_REPORTLUN2       ((__force blist_flags_t)(1 << 17))
 /* don't try REPORT_LUNS scan (SCSI-3 devs) */
-#define BLIST_NOREPORTLUN      ((__force __u32 __bitwise)(1 << 18))
+#define BLIST_NOREPORTLUN      ((__force blist_flags_t)(1 << 18))
 /* don't use PREVENT-ALLOW commands */
-#define BLIST_NOT_LOCKABLE     ((__force __u32 __bitwise)(1 << 19))
+#define BLIST_NOT_LOCKABLE     ((__force blist_flags_t)(1 << 19))
 /* device is actually for RAID config */
-#define BLIST_NO_ULD_ATTACH    ((__force __u32 __bitwise)(1 << 20))
+#define BLIST_NO_ULD_ATTACH    ((__force blist_flags_t)(1 << 20))
 /* select without ATN */
-#define BLIST_SELECT_NO_ATN    ((__force __u32 __bitwise)(1 << 21))
+#define BLIST_SELECT_NO_ATN    ((__force blist_flags_t)(1 << 21))
 /* retry HARDWARE_ERROR */
-#define BLIST_RETRY_HWERROR    ((__force __u32 __bitwise)(1 << 22))
+#define BLIST_RETRY_HWERROR    ((__force blist_flags_t)(1 << 22))
 /* maximum 512 sector cdb length */
-#define BLIST_MAX_512          ((__force __u32 __bitwise)(1 << 23))
+#define BLIST_MAX_512          ((__force blist_flags_t)(1 << 23))
 /* Disable T10 PI (DIF) */
-#define BLIST_NO_DIF           ((__force __u32 __bitwise)(1 << 25))
+#define BLIST_NO_DIF           ((__force blist_flags_t)(1 << 25))
 /* Ignore SBC-3 VPD pages */
-#define BLIST_SKIP_VPD_PAGES   ((__force __u32 __bitwise)(1 << 26))
+#define BLIST_SKIP_VPD_PAGES   ((__force blist_flags_t)(1 << 26))
 /* Attempt to read VPD pages */
-#define BLIST_TRY_VPD_PAGES    ((__force __u32 __bitwise)(1 << 28))
+#define BLIST_TRY_VPD_PAGES    ((__force blist_flags_t)(1 << 28))
 /* don't try to issue RSOC */
-#define BLIST_NO_RSOC          ((__force __u32 __bitwise)(1 << 29))
+#define BLIST_NO_RSOC          ((__force blist_flags_t)(1 << 29))
 /* maximum 1024 sector cdb length */
-#define BLIST_MAX_1024         ((__force __u32 __bitwise)(1 << 30))
+#define BLIST_MAX_1024         ((__force blist_flags_t)(1 << 30))
 /* Use UNMAP limit for WRITE SAME */
-#define BLIST_UNMAP_LIMIT_WS   ((__force __u32 __bitwise)(1 << 31))
+#define BLIST_UNMAP_LIMIT_WS   ((__force blist_flags_t)(1 << 31))
 
 #endif
index 506ea8ffda1995436750b05bde1ebe37605de2a7..482337af06b8d5a1c226b4b18bce2956b37768d1 100644 (file)
@@ -17,6 +17,7 @@
 /* 0x08 ~ 0x0c: Reserved */
 #define AT91_SFR_OHCIICR       0x10    /* OHCI INT Configuration Register */
 #define AT91_SFR_OHCIISR       0x14    /* OHCI INT Status Register */
+#define AT91_SFR_UTMICKTRIM    0x30    /* UTMI Clock Trimming Register */
 #define AT91_SFR_I2SCLKSEL     0x90    /* I2SC Register */
 
 /* Field definitions */
@@ -28,5 +29,6 @@
                                         AT91_OHCIICR_SUSPEND_B | \
                                         AT91_OHCIICR_SUSPEND_C)
 
+#define AT91_UTMICKTRIM_FREQ   GENMASK(1, 0)
 
 #endif /* _LINUX_MFD_SYSCON_ATMEL_SFR_H */
index a1f1152bc687613b87d7d4a1f73a3d139ef9f354..ca13a44ae9d44e971a977e489244b831ac83040c 100644 (file)
@@ -249,7 +249,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
 void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
 #define snd_ctl_sync_vmaster_hook(kctl)        snd_ctl_sync_vmaster(kctl, true)
 int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
-                                int (*func)(struct snd_kcontrol *, void *),
+                                int (*func)(struct snd_kcontrol *vslave,
+                                            struct snd_kcontrol *slave,
+                                            void *arg),
                                 void *arg);
 
 /*
index f5db145e68ecae901ed071a70fe95db4045791b8..2c8d8115469dce4c2fc92bbf2ee91a1e0bccae5f 100644 (file)
@@ -182,6 +182,7 @@ enum tcm_sense_reason_table {
        TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE   = R(0x1a),
        TCM_TOO_MANY_SEGMENT_DESCS              = R(0x1b),
        TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE  = R(0x1c),
+       TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
 #undef R
 };
 
@@ -490,6 +491,7 @@ struct se_cmd {
 #define CMD_T_STOP             (1 << 5)
 #define CMD_T_TAS              (1 << 10)
 #define CMD_T_FABRIC_STOP      (1 << 11)
+#define CMD_T_PRE_EXECUTE      (1 << 12)
        spinlock_t              t_state_lock;
        struct kref             cmd_kref;
        struct completion       t_transport_stop_comp;
index d61bfddcc621f16ac4ca4a85348c483bf3f30d44..2212adda8f77f7d8cb44b0bdb0b22445b1fcb87d 100644 (file)
@@ -9,46 +9,6 @@
 
 struct dma_fence;
 
-TRACE_EVENT(dma_fence_annotate_wait_on,
-
-       /* fence: the fence waiting on f1, f1: the fence to be waited on. */
-       TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
-
-       TP_ARGS(fence, f1),
-
-       TP_STRUCT__entry(
-               __string(driver, fence->ops->get_driver_name(fence))
-               __string(timeline, fence->ops->get_timeline_name(fence))
-               __field(unsigned int, context)
-               __field(unsigned int, seqno)
-
-               __string(waiting_driver, f1->ops->get_driver_name(f1))
-               __string(waiting_timeline, f1->ops->get_timeline_name(f1))
-               __field(unsigned int, waiting_context)
-               __field(unsigned int, waiting_seqno)
-       ),
-
-       TP_fast_assign(
-               __assign_str(driver, fence->ops->get_driver_name(fence))
-               __assign_str(timeline, fence->ops->get_timeline_name(fence))
-               __entry->context = fence->context;
-               __entry->seqno = fence->seqno;
-
-               __assign_str(waiting_driver, f1->ops->get_driver_name(f1))
-               __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
-               __entry->waiting_context = f1->context;
-               __entry->waiting_seqno = f1->seqno;
-
-       ),
-
-       TP_printk("driver=%s timeline=%s context=%u seqno=%u "  \
-                 "waits on driver=%s timeline=%s context=%u seqno=%u",
-                 __get_str(driver), __get_str(timeline), __entry->context,
-                 __entry->seqno,
-                 __get_str(waiting_driver), __get_str(waiting_timeline),
-                 __entry->waiting_context, __entry->waiting_seqno)
-);
-
 DECLARE_EVENT_CLASS(dma_fence,
 
        TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
new file mode 100644 (file)
index 0000000..f5024c5
--- /dev/null
@@ -0,0 +1,70 @@
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PREEMPTIRQ_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(preemptirq_template,
+
+       TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+       TP_ARGS(ip, parent_ip),
+
+       TP_STRUCT__entry(
+               __field(u32, caller_offs)
+               __field(u32, parent_offs)
+       ),
+
+       TP_fast_assign(
+               __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+               __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+       ),
+
+       TP_printk("caller=%pF parent=%pF",
+                 (void *)((unsigned long)(_stext) + __entry->caller_offs),
+                 (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+#ifndef CONFIG_PROVE_LOCKING
+DEFINE_EVENT(preemptirq_template, irq_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, irq_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+DEFINE_EVENT(preemptirq_template, preempt_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#endif /* _TRACE_PREEMPTIRQ_H */
+
+#include <trace/define_trace.h>
+
+#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
+
+#endif
index ebe96796027aefc816cfa90d96dbf8215e41ab21..36cb50c111a64827ab5e03f750c941c1345973c8 100644 (file)
@@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
        rxrpc_conn_put_client,
        rxrpc_conn_put_service,
        rxrpc_conn_queued,
+       rxrpc_conn_reap_service,
        rxrpc_conn_seen,
 };
 
@@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace {
 
 enum rxrpc_timer_trace {
        rxrpc_timer_begin,
+       rxrpc_timer_exp_ack,
+       rxrpc_timer_exp_hard,
+       rxrpc_timer_exp_idle,
+       rxrpc_timer_exp_keepalive,
+       rxrpc_timer_exp_lost_ack,
+       rxrpc_timer_exp_normal,
+       rxrpc_timer_exp_ping,
+       rxrpc_timer_exp_resend,
        rxrpc_timer_expired,
        rxrpc_timer_init_for_reply,
        rxrpc_timer_init_for_send_reply,
+       rxrpc_timer_restart,
        rxrpc_timer_set_for_ack,
+       rxrpc_timer_set_for_hard,
+       rxrpc_timer_set_for_idle,
+       rxrpc_timer_set_for_keepalive,
+       rxrpc_timer_set_for_lost_ack,
+       rxrpc_timer_set_for_normal,
        rxrpc_timer_set_for_ping,
        rxrpc_timer_set_for_resend,
        rxrpc_timer_set_for_send,
@@ -150,6 +165,7 @@ enum rxrpc_timer_trace {
 enum rxrpc_propose_ack_trace {
        rxrpc_propose_ack_client_tx_end,
        rxrpc_propose_ack_input_data,
+       rxrpc_propose_ack_ping_for_keepalive,
        rxrpc_propose_ack_ping_for_lost_ack,
        rxrpc_propose_ack_ping_for_lost_reply,
        rxrpc_propose_ack_ping_for_params,
@@ -206,6 +222,7 @@ enum rxrpc_congest_change {
        EM(rxrpc_conn_put_client,               "PTc") \
        EM(rxrpc_conn_put_service,              "PTs") \
        EM(rxrpc_conn_queued,                   "QUE") \
+       EM(rxrpc_conn_reap_service,             "RPs") \
        E_(rxrpc_conn_seen,                     "SEE")
 
 #define rxrpc_client_traces \
@@ -296,16 +313,31 @@ enum rxrpc_congest_change {
 #define rxrpc_timer_traces \
        EM(rxrpc_timer_begin,                   "Begin ") \
        EM(rxrpc_timer_expired,                 "*EXPR*") \
+       EM(rxrpc_timer_exp_ack,                 "ExpAck") \
+       EM(rxrpc_timer_exp_hard,                "ExpHrd") \
+       EM(rxrpc_timer_exp_idle,                "ExpIdl") \
+       EM(rxrpc_timer_exp_keepalive,           "ExpKA ") \
+       EM(rxrpc_timer_exp_lost_ack,            "ExpLoA") \
+       EM(rxrpc_timer_exp_normal,              "ExpNml") \
+       EM(rxrpc_timer_exp_ping,                "ExpPng") \
+       EM(rxrpc_timer_exp_resend,              "ExpRsn") \
        EM(rxrpc_timer_init_for_reply,          "IniRpl") \
        EM(rxrpc_timer_init_for_send_reply,     "SndRpl") \
+       EM(rxrpc_timer_restart,                 "Restrt") \
        EM(rxrpc_timer_set_for_ack,             "SetAck") \
+       EM(rxrpc_timer_set_for_hard,            "SetHrd") \
+       EM(rxrpc_timer_set_for_idle,            "SetIdl") \
+       EM(rxrpc_timer_set_for_keepalive,       "KeepAl") \
+       EM(rxrpc_timer_set_for_lost_ack,        "SetLoA") \
+       EM(rxrpc_timer_set_for_normal,          "SetNml") \
        EM(rxrpc_timer_set_for_ping,            "SetPng") \
        EM(rxrpc_timer_set_for_resend,          "SetRTx") \
-       E_(rxrpc_timer_set_for_send,            "SetTx ")
+       E_(rxrpc_timer_set_for_send,            "SetSnd")
 
 #define rxrpc_propose_ack_traces \
        EM(rxrpc_propose_ack_client_tx_end,     "ClTxEnd") \
        EM(rxrpc_propose_ack_input_data,        "DataIn ") \
+       EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
        EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
        EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
        EM(rxrpc_propose_ack_ping_for_params,   "Params ") \
@@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx,
 
 TRACE_EVENT(rxrpc_timer,
            TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-                    ktime_t now, unsigned long now_j),
+                    unsigned long now),
 
-           TP_ARGS(call, why, now, now_j),
+           TP_ARGS(call, why, now),
 
            TP_STRUCT__entry(
                    __field(struct rxrpc_call *,                call            )
                    __field(enum rxrpc_timer_trace,             why             )
-                   __field_struct(ktime_t,                     now             )
-                   __field_struct(ktime_t,                     expire_at       )
-                   __field_struct(ktime_t,                     ack_at          )
-                   __field_struct(ktime_t,                     resend_at       )
-                   __field(unsigned long,                      now_j           )
-                   __field(unsigned long,                      timer           )
+                   __field(long,                               now             )
+                   __field(long,                               ack_at          )
+                   __field(long,                               ack_lost_at     )
+                   __field(long,                               resend_at       )
+                   __field(long,                               ping_at         )
+                   __field(long,                               expect_rx_by    )
+                   __field(long,                               expect_req_by   )
+                   __field(long,                               expect_term_by  )
+                   __field(long,                               timer           )
                             ),
 
            TP_fast_assign(
-                   __entry->call       = call;
-                   __entry->why        = why;
-                   __entry->now        = now;
-                   __entry->expire_at  = call->expire_at;
-                   __entry->ack_at     = call->ack_at;
-                   __entry->resend_at  = call->resend_at;
-                   __entry->now_j      = now_j;
-                   __entry->timer      = call->timer.expires;
+                   __entry->call               = call;
+                   __entry->why                = why;
+                   __entry->now                = now;
+                   __entry->ack_at             = call->ack_at;
+                   __entry->ack_lost_at        = call->ack_lost_at;
+                   __entry->resend_at          = call->resend_at;
+                   __entry->expect_rx_by       = call->expect_rx_by;
+                   __entry->expect_req_by      = call->expect_req_by;
+                   __entry->expect_term_by     = call->expect_term_by;
+                   __entry->timer              = call->timer.expires;
                           ),
 
-           TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+           TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
                      __entry->call,
                      __print_symbolic(__entry->why, rxrpc_timer_traces),
-                     ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
-                     ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
-                     ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
-                     __entry->timer - __entry->now_j)
+                     __entry->ack_at - __entry->now,
+                     __entry->ack_lost_at - __entry->now,
+                     __entry->resend_at - __entry->now,
+                     __entry->expect_rx_by - __entry->now,
+                     __entry->expect_req_by - __entry->now,
+                     __entry->expect_term_by - __entry->now,
+                     __entry->timer - __entry->now)
            );
 
 TRACE_EVENT(rxrpc_rx_lose,
@@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest,
                    memcpy(&__entry->sum, summary, sizeof(__entry->sum));
                           ),
 
-           TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+           TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
                      __entry->call,
                      __entry->ack_serial,
                      __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
index 306b31de519417790a50ab9dcb510faafd44cb30..bc01e06bc7167fb2557ccb94e21c58e55f9c8fdb 100644 (file)
@@ -116,9 +116,9 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
         * RUNNING (we will not have dequeued if state != RUNNING).
         */
        if (preempt)
-               return TASK_STATE_MAX;
+               return TASK_REPORT_MAX;
 
-       return task_state_index(p);
+       return 1 << task_state_index(p);
 }
 #endif /* CREATE_TRACE_POINTS */
 
@@ -164,7 +164,7 @@ TRACE_EVENT(sched_switch,
                                { 0x40, "P" }, { 0x80, "I" }) :
                  "R",
 
-               __entry->prev_state & TASK_STATE_MAX ? "+" : "",
+               __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
 
index ecbdbfe86eb6620350fe5eca369906cd3ea60823..8c153f68509e297225db7614f8e706479476bb9c 100644 (file)
@@ -486,20 +486,22 @@ TRACE_EVENT(svc_recv,
        TP_ARGS(rqst, status),
 
        TP_STRUCT__entry(
-               __field(struct sockaddr *, addr)
                __field(u32, xid)
                __field(int, status)
                __field(unsigned long, flags)
+               __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
        ),
 
        TP_fast_assign(
-               __entry->addr = (struct sockaddr *)&rqst->rq_addr;
                __entry->xid = status > 0 ? be32_to_cpu(rqst->rq_xid) : 0;
                __entry->status = status;
                __entry->flags = rqst->rq_flags;
+               memcpy(__get_dynamic_array(addr),
+                       &rqst->rq_addr, rqst->rq_addrlen);
        ),
 
-       TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s", __entry->addr,
+       TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s",
+                       (struct sockaddr *)__get_dynamic_array(addr),
                        __entry->xid, __entry->status,
                        show_rqstp_flags(__entry->flags))
 );
@@ -544,22 +546,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
        TP_ARGS(rqst, status),
 
        TP_STRUCT__entry(
-               __field(struct sockaddr *, addr)
                __field(u32, xid)
-               __field(int, dropme)
                __field(int, status)
                __field(unsigned long, flags)
+               __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
        ),
 
        TP_fast_assign(
-               __entry->addr = (struct sockaddr *)&rqst->rq_addr;
                __entry->xid = be32_to_cpu(rqst->rq_xid);
                __entry->status = status;
                __entry->flags = rqst->rq_flags;
+               memcpy(__get_dynamic_array(addr),
+                       &rqst->rq_addr, rqst->rq_addrlen);
        ),
 
        TP_printk("addr=%pIScp rq_xid=0x%08x status=%d flags=%s",
-               __entry->addr, __entry->xid,
+               (struct sockaddr *)__get_dynamic_array(addr),
+               __entry->xid,
                __entry->status, show_rqstp_flags(__entry->flags))
 );
 
index 466c09d882ad3e928447f687b39bebb139f0821f..78946640fe03962c7207dec66844a895c0481f1d 100644 (file)
@@ -91,6 +91,7 @@ TRACE_EVENT(thermal_zone_trip,
                show_tzt_type(__entry->trip_type))
 );
 
+#ifdef CONFIG_CPU_THERMAL
 TRACE_EVENT(thermal_power_cpu_get_power,
        TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
                size_t load_len, u32 dynamic_power, u32 static_power),
@@ -148,7 +149,9 @@ TRACE_EVENT(thermal_power_cpu_limit,
                __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
                __entry->power)
 );
+#endif /* CONFIG_CPU_THERMAL */
 
+#ifdef CONFIG_DEVFREQ_THERMAL
 TRACE_EVENT(thermal_power_devfreq_get_power,
        TP_PROTO(struct thermal_cooling_device *cdev,
                 struct devfreq_dev_status *status, unsigned long freq,
@@ -204,6 +207,7 @@ TRACE_EVENT(thermal_power_devfreq_limit,
                __get_str(type), __entry->freq, __entry->cdev_state,
                __entry->power)
 );
+#endif /* CONFIG_DEVFREQ_THERMAL */
 #endif /* _TRACE_THERMAL_H */
 
 /* This part must be outside protection */
index dc23cf03240348f54df97966bf9372d2684bc249..d70b53e65f4323cdb51dc41c18b890f2ae7e6f25 100644 (file)
@@ -134,6 +134,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_b
        TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
 );
 
+#ifdef CONFIG_MEMCG
 DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
 
        TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
@@ -147,6 +148,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_
 
        TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
 );
+#endif /* CONFIG_MEMCG */
 
 DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
 
@@ -172,6 +174,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end
        TP_ARGS(nr_reclaimed)
 );
 
+#ifdef CONFIG_MEMCG
 DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
 
        TP_PROTO(unsigned long nr_reclaimed),
@@ -185,6 +188,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
 
        TP_ARGS(nr_reclaimed)
 );
+#endif /* CONFIG_MEMCG */
 
 TRACE_EVENT(mm_shrink_slab_start,
        TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
index a7c8b452aab9c7fefaa3908a31d00214b24d5717..b8adf05c534e725d1e0e3b614181b008e2488f65 100644 (file)
@@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
                     TP_ARGS(ptep, pteval))
 
 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
-DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
 
 TRACE_EVENT(xen_mmu_set_pte_at,
            TP_PROTO(struct mm_struct *mm, unsigned long addr,
@@ -170,21 +169,6 @@ TRACE_EVENT(xen_mmu_set_pte_at,
                      (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
        );
 
-TRACE_EVENT(xen_mmu_pte_clear,
-           TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
-           TP_ARGS(mm, addr, ptep),
-           TP_STRUCT__entry(
-                   __field(struct mm_struct *, mm)
-                   __field(unsigned long, addr)
-                   __field(pte_t *, ptep)
-                   ),
-           TP_fast_assign(__entry->mm = mm;
-                          __entry->addr = addr;
-                          __entry->ptep = ptep),
-           TP_printk("mm %p addr %lx ptep %p",
-                     __entry->mm, __entry->addr, __entry->ptep)
-       );
-
 TRACE_DEFINE_SIZEOF(pmdval_t);
 
 TRACE_EVENT(xen_mmu_set_pmd,
@@ -202,6 +186,24 @@ TRACE_EVENT(xen_mmu_set_pmd,
                      (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
        );
 
+#ifdef CONFIG_X86_PAE
+DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
+
+TRACE_EVENT(xen_mmu_pte_clear,
+           TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
+           TP_ARGS(mm, addr, ptep),
+           TP_STRUCT__entry(
+                   __field(struct mm_struct *, mm)
+                   __field(unsigned long, addr)
+                   __field(pte_t *, ptep)
+                   ),
+           TP_fast_assign(__entry->mm = mm;
+                          __entry->addr = addr;
+                          __entry->ptep = ptep),
+           TP_printk("mm %p addr %lx ptep %p",
+                     __entry->mm, __entry->addr, __entry->ptep)
+       );
+
 TRACE_EVENT(xen_mmu_pmd_clear,
            TP_PROTO(pmd_t *pmdp),
            TP_ARGS(pmdp),
@@ -211,6 +213,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
            TP_fast_assign(__entry->pmdp = pmdp),
            TP_printk("pmdp %p", __entry->pmdp)
        );
+#endif
 
 #if CONFIG_PGTABLE_LEVELS >= 4
 
index 73445ef07ddada2a20f7309baf9c1e832103f637..940b04772af801345a07687c0f920a5dd9ce4cfb 100644 (file)
@@ -76,7 +76,7 @@ struct bfs_super_block {
 #define BFS_FILEBLOCKS(ip) \
         ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) -  le32_to_cpu((ip)->i_sblock))
 #define BFS_UNCLEAN(bfs_sb, sb)        \
-       ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY))
+       ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & SB_RDONLY))
 
 
 #endif /* _LINUX_BFS_FS_H */
index e880ae6434eed9eb29db99169c716c94c7cf30aa..4c223ab30293cd1e07248b960e213c7f22b778c5 100644 (file)
@@ -262,7 +262,7 @@ union bpf_attr {
                __u32           kern_version;   /* checked when prog_type=kprobe */
                __u32           prog_flags;
                char            prog_name[BPF_OBJ_NAME_LEN];
-               __u32           prog_target_ifindex;    /* ifindex of netdev to prep for */
+               __u32           prog_ifindex;   /* ifindex of netdev to prep for */
        };
 
        struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -897,10 +897,6 @@ enum sk_action {
 
 #define BPF_TAG_SIZE   8
 
-enum bpf_prog_status {
-       BPF_PROG_STATUS_DEV_BOUND       = (1 << 0),
-};
-
 struct bpf_prog_info {
        __u32 type;
        __u32 id;
@@ -914,8 +910,6 @@ struct bpf_prog_info {
        __u32 nr_map_ids;
        __aligned_u64 map_ids;
        char name[BPF_OBJ_NAME_LEN];
-       __u32 ifindex;
-       __u32 status;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
index 33eabbb8ada11e5376364c5f3db99f90a97ddb10..9529867717a866cc7d02a4906165d0bd59faced0 100644 (file)
@@ -8,4 +8,28 @@
 #define KCOV_ENABLE                    _IO('c', 100)
 #define KCOV_DISABLE                   _IO('c', 101)
 
+enum {
+       /*
+        * Tracing coverage collection mode.
+        * Covered PCs are collected in a per-task buffer.
+        * In new KCOV version the mode is chosen by calling
+        * ioctl(fd, KCOV_ENABLE, mode). In older versions the mode argument
+        * was supposed to be 0 in such a call. So, for reasons of backward
+        * compatibility, we have chosen the value KCOV_TRACE_PC to be 0.
+        */
+       KCOV_TRACE_PC = 0,
+       /* Collecting comparison operands mode. */
+       KCOV_TRACE_CMP = 1,
+};
+
+/*
+ * The format for the types of collected comparisons.
+ *
+ * Bit 0 shows whether one of the arguments is a compile-time constant.
+ * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
+ */
+#define KCOV_CMP_CONST          (1 << 0)
+#define KCOV_CMP_SIZE(n)        ((n) << 1)
+#define KCOV_CMP_MASK           KCOV_CMP_SIZE(3)
+
 #endif /* _LINUX_KCOV_IOCTLS_H */
index 9d4afea308a434ceaea242c28ff2a00eee9a3287..9335d92c14a405311b5993f4dd459120a0cb0e85 100644 (file)
@@ -59,6 +59,7 @@ enum rxrpc_cmsg_type {
        RXRPC_EXCLUSIVE_CALL    = 10,   /* s-: Call should be on exclusive connection */
        RXRPC_UPGRADE_SERVICE   = 11,   /* s-: Request service upgrade for client call */
        RXRPC_TX_LENGTH         = 12,   /* s-: Total length of Tx data */
+       RXRPC_SET_CALL_TIMEOUT  = 13,   /* s-: Set one or more call timeouts */
        RXRPC__SUPPORTED
 };
 
index 2f6fb0dd613c09198c830c9059ba5f63e62f5de1..286e8d6a8e98a17a41fc2ffe380b9f68ba1704c0 100644 (file)
@@ -26,9 +26,9 @@ struct ipv6_sr_hdr {
        __u8    hdrlen;
        __u8    type;
        __u8    segments_left;
-       __u8    first_segment;
+       __u8    first_segment; /* Represents the last_entry field of SRH */
        __u8    flags;
-       __u16   reserved;
+       __u16   tag;
 
        struct in6_addr segments[0];
 };
index 14cd7dc5a187c85fea9d98089bee1fcf785971ed..0b4dd54f3d1eaeb14fa7ae5b066c4db260ab196c 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /* AF_VSOCK sock_diag(7) interface for querying open sockets */
 
 #ifndef _UAPI__VM_SOCKETS_DIAG_H__
diff --git a/include/uapi/linux/wmi.h b/include/uapi/linux/wmi.h
new file mode 100644 (file)
index 0000000..7a92e9e
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ *  User API methods for ACPI-WMI mapping driver
+ *
+ *  Copyright (C) 2017 Dell, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#ifndef _UAPI_LINUX_WMI_H
+#define _UAPI_LINUX_WMI_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* WMI bus will filter all WMI vendor driver requests through this IOC */
+#define WMI_IOC 'W'
+
+/* All ioctl requests through WMI should declare their size followed by
+ * relevant data objects
+ */
+struct wmi_ioctl_buffer {
+       __u64   length;
+       __u8    data[];
+};
+
+/* This structure may be modified by the firmware when we enter
+ * system management mode through SMM, hence the volatiles
+ */
+struct calling_interface_buffer {
+       __u16 cmd_class;
+       __u16 cmd_select;
+       volatile __u32 input[4];
+       volatile __u32 output[4];
+} __packed;
+
+struct dell_wmi_extensions {
+       __u32 argattrib;
+       __u32 blength;
+       __u8 data[];
+} __packed;
+
+struct dell_wmi_smbios_buffer {
+       __u64 length;
+       struct calling_interface_buffer std;
+       struct dell_wmi_extensions      ext;
+} __packed;
+
+/* Whitelisted smbios class/select commands */
+#define CLASS_TOKEN_READ       0
+#define CLASS_TOKEN_WRITE      1
+#define SELECT_TOKEN_STD       0
+#define SELECT_TOKEN_BAT       1
+#define SELECT_TOKEN_AC                2
+#define CLASS_FLASH_INTERFACE  7
+#define SELECT_FLASH_INTERFACE 3
+#define CLASS_ADMIN_PROP       10
+#define SELECT_ADMIN_PROP      3
+#define CLASS_INFO             17
+#define SELECT_RFKILL          11
+#define SELECT_APP_REGISTRATION        3
+#define SELECT_DOCK            22
+
+/* whitelisted tokens */
+#define CAPSULE_EN_TOKEN       0x0461
+#define CAPSULE_DIS_TOKEN      0x0462
+#define WSMT_EN_TOKEN          0x04EC
+#define WSMT_DIS_TOKEN         0x04ED
+
+/* Dell SMBIOS calling IOCTL command used by dell-smbios-wmi */
+#define DELL_WMI_SMBIOS_CMD    _IOWR(WMI_IOC, 0, struct dell_wmi_smbios_buffer)
+
+#endif
diff --git a/include/video/iga.h b/include/video/iga.h
deleted file mode 100644 (file)
index 83ca184..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: iga.h,v 1.2 1999/09/11 22:56:31 zaitcev Exp $
- * iga1682.h: Sparc/PCI iga1682 driver constants etc.
- *
- * Copyleft 1998 V. Roganov and G. Raiko
- */
-
-#ifndef _IGA1682_H
-#define _IGA1682_H 1
-
-#define IGA_ATTR_CTL                   0x3C0
-#define   IGA_IDX_VGA_OVERSCAN         0x11
-#define DAC_W_INDEX                     0x03C8
-#define DAC_DATA                        0x03C9
-#define IGA_EXT_CNTRL                   0x3CE
-#define   IGA_IDX_EXT_BUS_CNTL          0x30
-#define     MEM_SIZE_ALIAS              0x3
-#define     MEM_SIZE_1M                 0x0
-#define     MEM_SIZE_2M                 0x1
-#define     MEM_SIZE_4M                 0x2
-#define     MEM_SIZE_RESERVED           0x3
-#define   IGA_IDX_OVERSCAN_COLOR        0x58
-#define   IGA_IDX_EXT_MEM_2             0x72
-
-#endif /* !(_IGA1682_H) */
index 7d5a6fbac56a0fe0151b104b079770efd0a32e5d..2934249fba46746253b89d14f581547ada3d0bad 100644 (file)
@@ -283,19 +283,6 @@ config CROSS_MEMORY_ATTACH
          to directly read from or write to another process' address space.
          See the man page for more details.
 
-config FHANDLE
-       bool "open by fhandle syscalls" if EXPERT
-       select EXPORTFS
-       default y
-       help
-         If you say Y here, a user level program will be able to map
-         file names to handle and then later use the handle for
-         different file system operations. This is useful in implementing
-         userspace file servers, which now track files using handles instead
-         of names. The handle would remain the same even if file names
-         get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2)
-         syscalls.
-
 config USELIB
        bool "uselib syscall"
        def_bool ALPHA || M68K || SPARC || X86_32 || IA32_EMULATION
@@ -883,18 +870,6 @@ config SOCK_CGROUP_DATA
 
 endif # CGROUPS
 
-config CHECKPOINT_RESTORE
-       bool "Checkpoint/restore support" if EXPERT
-       select PROC_CHILDREN
-       default n
-       help
-         Enables additional kernel features in a sake of checkpoint/restore.
-         In particular it adds auxiliary prctl codes to setup process text,
-         data and heap segment sizes, and a few additional /proc filesystem
-         entries.
-
-         If unsure, say N here.
-
 menuconfig NAMESPACES
        bool "Namespaces support" if EXPERT
        depends on MULTIUSER
@@ -1163,6 +1138,19 @@ config SYSCTL_SYSCALL
 
          If unsure say N here.
 
+config FHANDLE
+       bool "open by fhandle syscalls" if EXPERT
+       select EXPORTFS
+       default y
+       help
+         If you say Y here, a user level program will be able to map
+         file names to handle and then later use the handle for
+         different file system operations. This is useful in implementing
+         userspace file servers, which now track files using handles instead
+         of names. The handle would remain the same even if file names
+         get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2)
+         syscalls.
+
 config POSIX_TIMERS
        bool "Posix Clocks & timers" if EXPERT
        default y
@@ -1180,54 +1168,6 @@ config POSIX_TIMERS
 
          If unsure say y.
 
-config KALLSYMS
-        bool "Load all symbols for debugging/ksymoops" if EXPERT
-        default y
-        help
-          Say Y here to let the kernel print out symbolic crash information and
-          symbolic stack backtraces. This increases the size of the kernel
-          somewhat, as all symbols have to be loaded into the kernel image.
-
-config KALLSYMS_ALL
-       bool "Include all symbols in kallsyms"
-       depends on DEBUG_KERNEL && KALLSYMS
-       help
-          Normally kallsyms only contains the symbols of functions for nicer
-          OOPS messages and backtraces (i.e., symbols from the text and inittext
-          sections). This is sufficient for most cases. And only in very rare
-          cases (e.g., when a debugger is used) all symbols are required (e.g.,
-          names of variables from the data sections, etc).
-
-          This option makes sure that all symbols are loaded into the kernel
-          image (i.e., symbols from all sections) in cost of increased kernel
-          size (depending on the kernel configuration, it may be 300KiB or
-          something like this).
-
-          Say N unless you really need all symbols.
-
-config KALLSYMS_ABSOLUTE_PERCPU
-       bool
-       depends on KALLSYMS
-       default X86_64 && SMP
-
-config KALLSYMS_BASE_RELATIVE
-       bool
-       depends on KALLSYMS
-       default !IA64 && !(TILE && 64BIT)
-       help
-         Instead of emitting them as absolute values in the native word size,
-         emit the symbol references in the kallsyms table as 32-bit entries,
-         each containing a relative value in the range [base, base + U32_MAX]
-         or, when KALLSYMS_ABSOLUTE_PERCPU is in effect, each containing either
-         an absolute value in the range [0, S32_MAX] or a relative value in the
-         range [base, base + S32_MAX], where base is the lowest relative symbol
-         address encountered in the image.
-
-         On 64-bit builds, this reduces the size of the address table by 50%,
-         but more importantly, it results in entries whose values are build
-         time constants, and no relocation pass is required at runtime to fix
-         up the entries based on the runtime load address of the kernel.
-
 config PRINTK
        default y
        bool "Enable support for printk" if EXPERT
@@ -1339,16 +1279,6 @@ config EVENTFD
 
          If unsure, say Y.
 
-# syscall, maps, verifier
-config BPF_SYSCALL
-       bool "Enable bpf() system call"
-       select ANON_INODES
-       select BPF
-       default n
-       help
-         Enable the bpf() system call that allows to manipulate eBPF
-         programs and maps via file descriptors.
-
 config SHMEM
        bool "Use full shmem filesystem" if EXPERT
        default y
@@ -1378,14 +1308,6 @@ config ADVISE_SYSCALLS
          applications use these syscalls, you can disable this option to save
          space.
 
-config USERFAULTFD
-       bool "Enable userfaultfd() system call"
-       select ANON_INODES
-       depends on MMU
-       help
-         Enable the userfaultfd() system call that allows to intercept and
-         handle page faults in userland.
-
 config MEMBARRIER
        bool "Enable membarrier() system call" if EXPERT
        default y
@@ -1398,6 +1320,86 @@ config MEMBARRIER
 
          If unsure, say Y.
 
+config CHECKPOINT_RESTORE
+       bool "Checkpoint/restore support" if EXPERT
+       select PROC_CHILDREN
+       default n
+       help
+         Enables additional kernel features in a sake of checkpoint/restore.
+         In particular it adds auxiliary prctl codes to setup process text,
+         data and heap segment sizes, and a few additional /proc filesystem
+         entries.
+
+         If unsure, say N here.
+
+config KALLSYMS
+        bool "Load all symbols for debugging/ksymoops" if EXPERT
+        default y
+        help
+          Say Y here to let the kernel print out symbolic crash information and
+          symbolic stack backtraces. This increases the size of the kernel
+          somewhat, as all symbols have to be loaded into the kernel image.
+
+config KALLSYMS_ALL
+       bool "Include all symbols in kallsyms"
+       depends on DEBUG_KERNEL && KALLSYMS
+       help
+          Normally kallsyms only contains the symbols of functions for nicer
+          OOPS messages and backtraces (i.e., symbols from the text and inittext
+          sections). This is sufficient for most cases. And only in very rare
+          cases (e.g., when a debugger is used) all symbols are required (e.g.,
+          names of variables from the data sections, etc).
+
+          This option makes sure that all symbols are loaded into the kernel
+          image (i.e., symbols from all sections) in cost of increased kernel
+          size (depending on the kernel configuration, it may be 300KiB or
+          something like this).
+
+          Say N unless you really need all symbols.
+
+config KALLSYMS_ABSOLUTE_PERCPU
+       bool
+       depends on KALLSYMS
+       default X86_64 && SMP
+
+config KALLSYMS_BASE_RELATIVE
+       bool
+       depends on KALLSYMS
+       default !IA64 && !(TILE && 64BIT)
+       help
+         Instead of emitting them as absolute values in the native word size,
+         emit the symbol references in the kallsyms table as 32-bit entries,
+         each containing a relative value in the range [base, base + U32_MAX]
+         or, when KALLSYMS_ABSOLUTE_PERCPU is in effect, each containing either
+         an absolute value in the range [0, S32_MAX] or a relative value in the
+         range [base, base + S32_MAX], where base is the lowest relative symbol
+         address encountered in the image.
+
+         On 64-bit builds, this reduces the size of the address table by 50%,
+         but more importantly, it results in entries whose values are build
+         time constants, and no relocation pass is required at runtime to fix
+         up the entries based on the runtime load address of the kernel.
+
+# end of the "standard kernel features (expert users)" menu
+
+# syscall, maps, verifier
+config BPF_SYSCALL
+       bool "Enable bpf() system call"
+       select ANON_INODES
+       select BPF
+       default n
+       help
+         Enable the bpf() system call that allows to manipulate eBPF
+         programs and maps via file descriptors.
+
+config USERFAULTFD
+       bool "Enable userfaultfd() system call"
+       select ANON_INODES
+       depends on MMU
+       help
+         Enable the userfaultfd() system call that allows to intercept and
+         handle page faults in userland.
+
 config EMBEDDED
        bool "Embedded system"
        option allnoconfig_y
index 7046feffef6b5852102a8c021afcaeb8420d3a74..7e99a00389429d79281ac2663a6fb7a4bedaef1d 100644 (file)
@@ -109,7 +109,7 @@ static void __init free_hash(void)
        }
 }
 
-static long __init do_utime(char *filename, time_t mtime)
+static long __init do_utime(char *filename, time64_t mtime)
 {
        struct timespec64 t[2];
 
@@ -125,10 +125,10 @@ static __initdata LIST_HEAD(dir_list);
 struct dir_entry {
        struct list_head list;
        char *name;
-       time_t mtime;
+       time64_t mtime;
 };
 
-static void __init dir_add(const char *name, time_t mtime)
+static void __init dir_add(const char *name, time64_t mtime)
 {
        struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL);
        if (!de)
@@ -150,7 +150,7 @@ static void __init dir_utime(void)
        }
 }
 
-static __initdata time_t mtime;
+static __initdata time64_t mtime;
 
 /* cpio header parsing */
 
@@ -177,7 +177,7 @@ static void __init parse_header(char *s)
        uid = parsed[2];
        gid = parsed[3];
        nlink = parsed[4];
-       mtime = parsed[5];
+       mtime = parsed[5]; /* breaks in y2106 */
        body_len = parsed[6];
        major = parsed[7];
        minor = parsed[8];
index 859a786f7c0abfc32d7a7c873bc047f92585f84c..dfec3809e7404f9658d51a20aae3869cd7aab9c3 100644 (file)
@@ -562,7 +562,6 @@ asmlinkage __visible void __init start_kernel(void)
         * kmem_cache_init()
         */
        setup_log_buf(0);
-       pidhash_init();
        vfs_caches_init_early();
        sort_main_extable();
        trap_init();
@@ -669,7 +668,7 @@ asmlinkage __visible void __init start_kernel(void)
        if (late_time_init)
                late_time_init();
        calibrate_delay();
-       pidmap_init();
+       pid_idr_init();
        anon_vma_init();
 #ifdef CONFIG_X86
        if (efi_enabled(EFI_RUNTIME_SERVICES))
index 5606341e9efd5510c9b8ede9eb01b12b9965e6c9..bfb4e3f4955e0caed1ae3d5b404f55ccdacf35a0 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <generated/compile.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/uts.h>
 #include <linux/utsname.h>
 #include <generated/utsrelease.h>
index d240256263103f89972ab5e62201549a7074a829..9649ecd8a73a704fe07e3d678ac96723d33aec05 100644 (file)
@@ -331,7 +331,7 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
                         void *data)
 {
        struct ipc_namespace *ns;
-       if (flags & MS_KERNMOUNT) {
+       if (flags & SB_KERNMOUNT) {
                ns = data;
                data = NULL;
        } else {
index a5cff0e109ab5a56fbbd3286cf96e625e99c1b53..87bd38f38dc328529ef2f6854a7a3d1e945bd9a0 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -515,6 +515,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        sma->sem_nsems = nsems;
        sma->sem_ctime = ktime_get_real_seconds();
 
+       /* ipc_addid() locks sma upon success. */
        retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
        if (retval < 0) {
                call_rcu(&sma->sem_perm.rcu, sem_rcu_free);
index 7733d768666d557ca08e66ca28986f348b8d6b8b..7acda23430aa04c14a5466946cc141c60f5627a5 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -601,6 +601,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_file = file;
        shp->shm_creator = current;
 
+       /* ipc_addid() locks shp upon success. */
        error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
        if (error < 0)
                goto no_id;
index 79b30eee32cd857c5dae1a71d1cdf37b34b07d0a..ff045fec8d835549d35faf54742be3f15f1b356a 100644 (file)
@@ -116,13 +116,16 @@ int ipc_init_ids(struct ipc_ids *ids)
        int err;
        ids->in_use = 0;
        ids->seq = 0;
-       ids->next_id = -1;
        init_rwsem(&ids->rwsem);
        err = rhashtable_init(&ids->key_ht, &ipc_kht_params);
        if (err)
                return err;
        idr_init(&ids->ipcs_idr);
        ids->tables_initialized = true;
+       ids->max_id = -1;
+#ifdef CONFIG_CHECKPOINT_RESTORE
+       ids->next_id = -1;
+#endif
        return 0;
 }
 
@@ -186,41 +189,51 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
        return NULL;
 }
 
-/**
- * ipc_get_maxid - get the last assigned id
- * @ids: ipc identifier set
- *
- * Called with ipc_ids.rwsem held.
+#ifdef CONFIG_CHECKPOINT_RESTORE
+/*
+ * Specify desired id for next allocated IPC object.
  */
-int ipc_get_maxid(struct ipc_ids *ids)
+#define ipc_idr_alloc(ids, new)                                                \
+       idr_alloc(&(ids)->ipcs_idr, (new),                              \
+                 (ids)->next_id < 0 ? 0 : ipcid_to_idx((ids)->next_id),\
+                 0, GFP_NOWAIT)
+
+static inline int ipc_buildid(int id, struct ipc_ids *ids,
+                             struct kern_ipc_perm *new)
 {
-       struct kern_ipc_perm *ipc;
-       int max_id = -1;
-       int total, id;
+       if (ids->next_id < 0) { /* default, behave as !CHECKPOINT_RESTORE */
+               new->seq = ids->seq++;
+               if (ids->seq > IPCID_SEQ_MAX)
+                       ids->seq = 0;
+       } else {
+               new->seq = ipcid_to_seqx(ids->next_id);
+               ids->next_id = -1;
+       }
 
-       if (ids->in_use == 0)
-               return -1;
+       return SEQ_MULTIPLIER * new->seq + id;
+}
 
-       if (ids->in_use == IPCMNI)
-               return IPCMNI - 1;
+#else
+#define ipc_idr_alloc(ids, new)                                        \
+       idr_alloc(&(ids)->ipcs_idr, (new), 0, 0, GFP_NOWAIT)
 
-       /* Look for the last assigned id */
-       total = 0;
-       for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
-               ipc = idr_find(&ids->ipcs_idr, id);
-               if (ipc != NULL) {
-                       max_id = id;
-                       total++;
-               }
-       }
-       return max_id;
+static inline int ipc_buildid(int id, struct ipc_ids *ids,
+                             struct kern_ipc_perm *new)
+{
+       new->seq = ids->seq++;
+       if (ids->seq > IPCID_SEQ_MAX)
+               ids->seq = 0;
+
+       return SEQ_MULTIPLIER * new->seq + id;
 }
 
+#endif /* CONFIG_CHECKPOINT_RESTORE */
+
 /**
  * ipc_addid - add an ipc identifier
  * @ids: ipc identifier set
  * @new: new ipc permission set
- * @size: limit for the number of used ids
+ * @limit: limit for the number of used ids
  *
  * Add an entry 'new' to the ipc ids idr. The permissions object is
  * initialised and the first free entry is set up and the id assigned
@@ -229,17 +242,16 @@ int ipc_get_maxid(struct ipc_ids *ids)
  *
  * Called with writer ipc_ids.rwsem held.
  */
-int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
+int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit)
 {
        kuid_t euid;
        kgid_t egid;
        int id, err;
-       int next_id = ids->next_id;
 
-       if (size > IPCMNI)
-               size = IPCMNI;
+       if (limit > IPCMNI)
+               limit = IPCMNI;
 
-       if (!ids->tables_initialized || ids->in_use >= size)
+       if (!ids->tables_initialized || ids->in_use >= limit)
                return -ENOSPC;
 
        idr_preload(GFP_KERNEL);
@@ -254,9 +266,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
        new->cuid = new->uid = euid;
        new->gid = new->cgid = egid;
 
-       id = idr_alloc(&ids->ipcs_idr, new,
-                      (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
-                      GFP_NOWAIT);
+       id = ipc_idr_alloc(ids, new);
        idr_preload_end();
 
        if (id >= 0 && new->key != IPC_PRIVATE) {
@@ -274,17 +284,11 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
        }
 
        ids->in_use++;
+       if (id > ids->max_id)
+               ids->max_id = id;
 
-       if (next_id < 0) {
-               new->seq = ids->seq++;
-               if (ids->seq > IPCID_SEQ_MAX)
-                       ids->seq = 0;
-       } else {
-               new->seq = ipcid_to_seqx(next_id);
-               ids->next_id = -1;
-       }
+       new->id = ipc_buildid(id, ids, new);
 
-       new->id = ipc_buildid(id, new->seq);
        return id;
 }
 
@@ -429,6 +433,15 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
        ipc_kht_remove(ids, ipcp);
        ids->in_use--;
        ipcp->deleted = true;
+
+       if (unlikely(lid == ids->max_id)) {
+               do {
+                       lid--;
+                       if (lid == -1)
+                               break;
+               } while (!idr_find(&ids->ipcs_idr, lid));
+               ids->max_id = lid;
+       }
 }
 
 /**
index 579112d90016ce5b07635555fca6c4b1f60fe84e..89b8ec176fc4c41aae00360f08a833532903577a 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/unistd.h>
 #include <linux/err.h>
+#include <linux/ipc_namespace.h>
 
 #define SEQ_MULTIPLIER (IPCMNI)
 
@@ -99,9 +100,6 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
 /* must be called with ids->rwsem acquired for writing */
 int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
 
-/* must be called with ids->rwsem acquired for reading */
-int ipc_get_maxid(struct ipc_ids *);
-
 /* must be called with both locks acquired. */
 void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *);
 
@@ -111,6 +109,23 @@ void ipc_set_key_private(struct ipc_ids *, struct kern_ipc_perm *);
 /* must be called with ipcp locked */
 int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
 
+/**
+ * ipc_get_maxid - get the last assigned id
+ * @ids: ipc identifier set
+ *
+ * Called with ipc_ids.rwsem held for reading.
+ */
+static inline int ipc_get_maxid(struct ipc_ids *ids)
+{
+       if (ids->in_use == 0)
+               return -1;
+
+       if (ids->in_use == IPCMNI)
+               return IPCMNI - 1;
+
+       return ids->max_id;
+}
+
 /*
  * For allocation that need to be freed by RCU.
  * Objects are reference counted, they start with reference count 1.
@@ -146,11 +161,6 @@ extern struct msg_msg *load_msg(const void __user *src, size_t len);
 extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst);
 extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
 
-static inline int ipc_buildid(int id, int seq)
-{
-       return SEQ_MULTIPLIER * seq + id;
-}
-
 static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int uid)
 {
        return uid / SEQ_MULTIPLIER != ipcp->seq;
index 2816feb38be16a0137bc7aa351606f24466c55e1..68ec884440b75da08824249db74bb992f6d938ce 100644 (file)
@@ -14,8 +14,9 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
        struct net *net = current->nsproxy->net_ns;
        struct bpf_dev_offload *offload;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
+       if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
+           attr->prog_type != BPF_PROG_TYPE_XDP)
+               return -EINVAL;
 
        if (attr->prog_flags)
                return -EINVAL;
@@ -28,7 +29,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
        init_waitqueue_head(&offload->verifier_done);
 
        rtnl_lock();
-       offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex);
+       offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
        if (!offload->netdev) {
                rtnl_unlock();
                kfree(offload);
@@ -85,6 +86,10 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
        struct bpf_dev_offload *offload = prog->aux->offload;
        struct netdev_bpf data = {};
 
+       /* Caution - if netdev is destroyed before the program, this function
+        * will be called twice.
+        */
+
        data.offload.prog = prog;
 
        if (offload->verifier_running)
@@ -144,18 +149,6 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
        return bpf_prog_offload_translate(prog);
 }
 
-u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
-{
-       struct bpf_dev_offload *offload = prog->aux->offload;
-       u32 ifindex;
-
-       rtnl_lock();
-       ifindex = offload->netdev ? offload->netdev->ifindex : 0;
-       rtnl_unlock();
-
-       return ifindex;
-}
-
 const struct bpf_prog_ops bpf_offload_prog_ops = {
 };
 
@@ -169,6 +162,10 @@ static int bpf_offload_notification(struct notifier_block *notifier,
 
        switch (event) {
        case NETDEV_UNREGISTER:
+               /* ignore namespace changes */
+               if (netdev->reg_state != NETREG_UNREGISTERING)
+                       break;
+
                list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
                                         offloads) {
                        if (offload->netdev == netdev)
index 09badc37e86467bdef5923c52b4448b9eccc1d18..2c4cfeaa8d5e785f16758be08cb8a462766363d9 100644 (file)
@@ -1057,22 +1057,23 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
 
-static bool bpf_prog_can_attach(struct bpf_prog *prog,
-                               enum bpf_prog_type *attach_type,
-                               struct net_device *netdev)
+static bool bpf_prog_get_ok(struct bpf_prog *prog,
+                           enum bpf_prog_type *attach_type, bool attach_drv)
 {
-       struct bpf_dev_offload *offload = prog->aux->offload;
+       /* not an attachment, just a refcount inc, always allow */
+       if (!attach_type)
+               return true;
 
        if (prog->type != *attach_type)
                return false;
-       if (offload && offload->netdev != netdev)
+       if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
                return false;
 
        return true;
 }
 
 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
-                                      struct net_device *netdev)
+                                      bool attach_drv)
 {
        struct fd f = fdget(ufd);
        struct bpf_prog *prog;
@@ -1080,7 +1081,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
        prog = ____bpf_prog_get(f);
        if (IS_ERR(prog))
                return prog;
-       if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) {
+       if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
                prog = ERR_PTR(-EINVAL);
                goto out;
        }
@@ -1093,23 +1094,13 @@ out:
 
 struct bpf_prog *bpf_prog_get(u32 ufd)
 {
-       return __bpf_prog_get(ufd, NULL, NULL);
+       return __bpf_prog_get(ufd, NULL, false);
 }
 
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
-{
-       struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL);
-
-       if (!IS_ERR(prog))
-               trace_bpf_prog_get_type(prog);
-       return prog;
-}
-EXPORT_SYMBOL_GPL(bpf_prog_get_type);
-
 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
-                                      struct net_device *netdev)
+                                      bool attach_drv)
 {
-       struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev);
+       struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
 
        if (!IS_ERR(prog))
                trace_bpf_prog_get_type(prog);
@@ -1118,7 +1109,7 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
 
 /* last field in 'union bpf_attr' used by this command */
-#define        BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
+#define        BPF_PROG_LOAD_LAST_FIELD prog_ifindex
 
 static int bpf_prog_load(union bpf_attr *attr)
 {
@@ -1181,7 +1172,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        atomic_set(&prog->aux->refcnt, 1);
        prog->gpl_compatible = is_gpl ? 1 : 0;
 
-       if (attr->prog_target_ifindex) {
+       if (attr->prog_ifindex) {
                err = bpf_prog_offload_init(prog, attr);
                if (err)
                        goto free_prog;
@@ -1625,11 +1616,6 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                        return -EFAULT;
        }
 
-       if (bpf_prog_is_dev_bound(prog->aux)) {
-               info.status |= BPF_PROG_STATUS_DEV_BOUND;
-               info.ifindex = bpf_prog_offload_ifindex(prog);
-       }
-
 done:
        if (copy_to_user(uinfo, &info, info_len) ||
            put_user(info_len, &uattr->info.info_len))
index dd54d20ace2ff7bc3aa942f4a08e08dadfb1a4e6..d4593571c4049b8d046f53f81f8e17911a21e0c9 100644 (file)
@@ -1384,13 +1384,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                if (type != expected_type)
                        goto err_type;
        } else if (arg_type == ARG_PTR_TO_MEM ||
+                  arg_type == ARG_PTR_TO_MEM_OR_NULL ||
                   arg_type == ARG_PTR_TO_UNINIT_MEM) {
                expected_type = PTR_TO_STACK;
                /* One exception here. In case function allows for NULL to be
                 * passed in as argument, it's a SCALAR_VALUE type. Final test
                 * happens during stack boundary checking.
                 */
-               if (register_is_null(*reg))
+               if (register_is_null(*reg) &&
+                   arg_type == ARG_PTR_TO_MEM_OR_NULL)
                        /* final test in check_stack_boundary() */;
                else if (!type_is_pkt_pointer(type) &&
                         type != PTR_TO_MAP_VALUE &&
@@ -3825,6 +3827,7 @@ static int do_check(struct bpf_verifier_env *env)
                        return err;
 
                regs = cur_regs(env);
+               env->insn_aux_data[insn_idx].seen = true;
                if (class == BPF_ALU || class == BPF_ALU64) {
                        err = check_alu_op(env, insn);
                        if (err)
@@ -4020,6 +4023,7 @@ process_bpf_exit:
                                        return err;
 
                                insn_idx++;
+                               env->insn_aux_data[insn_idx].seen = true;
                        } else {
                                verbose(env, "invalid BPF_LD mode\n");
                                return -EINVAL;
@@ -4202,6 +4206,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
                                u32 off, u32 cnt)
 {
        struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+       int i;
 
        if (cnt == 1)
                return 0;
@@ -4211,6 +4216,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
        memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
        memcpy(new_data + off + cnt - 1, old_data + off,
               sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+       for (i = off; i < off + cnt - 1; i++)
+               new_data[i].seen = true;
        env->insn_aux_data = new_data;
        vfree(old_data);
        return 0;
@@ -4229,6 +4236,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
        return new_prog;
 }
 
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct bpf_verifier_env *env)
+{
+       struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+       struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+       struct bpf_insn *insn = env->prog->insnsi;
+       const int insn_cnt = env->prog->len;
+       int i;
+
+       for (i = 0; i < insn_cnt; i++) {
+               if (aux_data[i].seen)
+                       continue;
+               memcpy(insn + i, &nop, sizeof(nop));
+       }
+}
+
 /* convert load instructions that access fields of 'struct __sk_buff'
  * into sequence of instructions that access fields of 'struct sk_buff'
  */
@@ -4555,6 +4581,9 @@ skip_full_check:
        while (!pop_stack(env, NULL, NULL));
        free_states(env);
 
+       if (ret == 0)
+               sanitize_dead_code(env);
+
        if (ret == 0)
                /* program is valid, convert *(u32*)(ctx + off) accesses */
                ret = convert_ctx_accesses(env);
index 6db80fc0810b9270b0cec3d7cfa2cf9b5a3413f1..b3663896278ed71f854963024caa6549b6a57935 100644 (file)
@@ -108,7 +108,8 @@ static int __init parse_crashkernel_mem(char *cmdline,
                                return -EINVAL;
                        }
                }
-       }
+       } else
+               pr_info("crashkernel size resulted in zero bytes\n");
 
        return 0;
 }
index 3939a4674e0ae48395f290edc20d61f4c307c89b..16beab4767e1e686e8ccd3642a82cbc4adde7f59 100644 (file)
@@ -6676,6 +6676,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
                ns_inode = ns_path.dentry->d_inode;
                ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
                ns_link_info->ino = ns_inode->i_ino;
+               path_put(&ns_path);
        }
 }
 
@@ -7874,15 +7875,16 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
                }
        }
        perf_tp_event(call->event.type, count, raw_data, size, regs, head,
-                     rctx, task, NULL);
+                     rctx, task);
 }
 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
 
 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                   struct pt_regs *regs, struct hlist_head *head, int rctx,
-                  struct task_struct *task, struct perf_event *event)
+                  struct task_struct *task)
 {
        struct perf_sample_data data;
+       struct perf_event *event;
 
        struct perf_raw_record raw = {
                .frag = {
@@ -7896,15 +7898,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
 
        perf_trace_buf_update(record, event_type);
 
-       /* Use the given event instead of the hlist */
-       if (event) {
+       hlist_for_each_entry_rcu(event, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
                        perf_swevent_event(event, count, &data, regs);
-       } else {
-               hlist_for_each_entry_rcu(event, head, hlist_entry) {
-                       if (perf_tp_event_match(event, &data, regs))
-                               perf_swevent_event(event, count, &data, regs);
-               }
        }
 
        /*
index 4e55eedba8d689cfc949ea92caa4111cdca22686..432eadf6b58c18d9de6a3d09f3fef36089b4b5a2 100644 (file)
@@ -1871,7 +1871,7 @@ static __latent_entropy struct task_struct *copy_process(
                retval = -ERESTARTNOINTR;
                goto bad_fork_cancel_cgroup;
        }
-       if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
+       if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
                retval = -ENOMEM;
                goto bad_fork_cancel_cgroup;
        }
index 2ff1c0c82fc91a91e7a59edc1de72aab85d131c7..0f922729bab9b202d1d79054dee2ff56d8a523a2 100644 (file)
@@ -1246,7 +1246,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 * set the trigger type must match. Also all must
                 * agree on ONESHOT.
                 */
-               unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
+               unsigned int oldtype;
+
+               /*
+                * If nobody did set the configuration before, inherit
+                * the one provided by the requester.
+                */
+               if (irqd_trigger_type_was_set(&desc->irq_data)) {
+                       oldtype = irqd_get_trigger_type(&desc->irq_data);
+               } else {
+                       oldtype = new->flags & IRQF_TRIGGER_MASK;
+                       irqd_set_trigger_type(&desc->irq_data, oldtype);
+               }
 
                if (!((old->flags & new->flags) & IRQF_SHARED) ||
                    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
index a3cbbc8191c52da53a1b522db248aa79528bb13f..7df2480005f863693f20d5450515d85085c225b6 100644 (file)
@@ -384,7 +384,7 @@ unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
 {
        struct cpumap *cm = this_cpu_ptr(m->maps);
 
-       return m->global_available - cpudown ? cm->available : 0;
+       return (m->global_available - cpudown) ? cm->available : 0;
 }
 
 /**
index 1215229d1c1281b2363092b86d9246336ee636e0..ef2a47e0eab6d3030af076cf62a0606ff4c6149f 100644 (file)
@@ -20,7 +20,7 @@
 static int irqfixup __read_mostly;
 
 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
-static void poll_spurious_irqs(unsigned long dummy);
+static void poll_spurious_irqs(struct timer_list *unused);
 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
 static int irq_poll_cpu;
 static atomic_t irq_poll_active;
@@ -143,7 +143,7 @@ out:
        return ok;
 }
 
-static void poll_spurious_irqs(unsigned long dummy)
+static void poll_spurious_irqs(struct timer_list *unused)
 {
        struct irq_desc *desc;
        int i;
index 8ff4ca4665ff830014db9b9698726ac0b732da58..8594d24e4adc2245e0f7cd0f6f48d9eef755d551 100644 (file)
@@ -769,7 +769,7 @@ static __init int jump_label_test(void)
 
        return 0;
 }
-late_initcall(jump_label_test);
+early_initcall(jump_label_test);
 #endif /* STATIC_KEYS_SELFTEST */
 
 #endif /* HAVE_JUMP_LABEL */
index 1e6ae66c6244329fa21f6190b1ab9d088493bfd5..d5fa4116688aff6f9d5c3c58787b4e848be4dc0a 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ctype.h>
 #include <linux/slab.h>
 #include <linux/filter.h>
+#include <linux/ftrace.h>
 #include <linux/compiler.h>
 
 #include <asm/sections.h>
@@ -337,6 +338,10 @@ const char *kallsyms_lookup(unsigned long addr,
        if (!ret)
                ret = bpf_address_lookup(addr, symbolsize,
                                         offset, modname, namebuf);
+
+       if (!ret)
+               ret = ftrace_mod_address_lookup(addr, symbolsize,
+                                               offset, modname, namebuf);
        return ret;
 }
 
@@ -474,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol);
 struct kallsym_iter {
        loff_t pos;
        loff_t pos_mod_end;
+       loff_t pos_ftrace_mod_end;
        unsigned long value;
        unsigned int nameoff; /* If iterating in core kernel symbols. */
        char type;
@@ -497,11 +503,25 @@ static int get_ksymbol_mod(struct kallsym_iter *iter)
        return 1;
 }
 
+static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
+{
+       int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
+                                        &iter->value, &iter->type,
+                                        iter->name, iter->module_name,
+                                        &iter->exported);
+       if (ret < 0) {
+               iter->pos_ftrace_mod_end = iter->pos;
+               return 0;
+       }
+
+       return 1;
+}
+
 static int get_ksymbol_bpf(struct kallsym_iter *iter)
 {
        iter->module_name[0] = '\0';
        iter->exported = 0;
-       return bpf_get_kallsym(iter->pos - iter->pos_mod_end,
+       return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
                               &iter->value, &iter->type,
                               iter->name) < 0 ? 0 : 1;
 }
@@ -526,20 +546,31 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
        iter->name[0] = '\0';
        iter->nameoff = get_symbol_offset(new_pos);
        iter->pos = new_pos;
-       if (new_pos == 0)
+       if (new_pos == 0) {
                iter->pos_mod_end = 0;
+               iter->pos_ftrace_mod_end = 0;
+       }
 }
 
 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
 {
        iter->pos = pos;
 
-       if (iter->pos_mod_end > 0 &&
-           iter->pos_mod_end < iter->pos)
+       if (iter->pos_ftrace_mod_end > 0 &&
+           iter->pos_ftrace_mod_end < iter->pos)
                return get_ksymbol_bpf(iter);
 
-       if (!get_ksymbol_mod(iter))
-               return get_ksymbol_bpf(iter);
+       if (iter->pos_mod_end > 0 &&
+           iter->pos_mod_end < iter->pos) {
+               if (!get_ksymbol_ftrace_mod(iter))
+                       return get_ksymbol_bpf(iter);
+               return 1;
+       }
+
+       if (!get_ksymbol_mod(iter)) {
+               if (!get_ksymbol_ftrace_mod(iter))
+                       return get_ksymbol_bpf(iter);
+       }
 
        return 1;
 }
@@ -583,14 +614,14 @@ static void s_stop(struct seq_file *m, void *p)
 
 static int s_show(struct seq_file *m, void *p)
 {
-       unsigned long value;
+       void *value;
        struct kallsym_iter *iter = m->private;
 
        /* Some debugging symbols have no name.  Ignore them. */
        if (!iter->name[0])
                return 0;
 
-       value = iter->show_value ? iter->value : 0;
+       value = iter->show_value ? (void *)iter->value : NULL;
 
        if (iter->module_name[0]) {
                char type;
@@ -601,10 +632,10 @@ static int s_show(struct seq_file *m, void *p)
                 */
                type = iter->exported ? toupper(iter->type) :
                                        tolower(iter->type);
-               seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value,
+               seq_printf(m, "%px %c %s\t[%s]\n", value,
                           type, iter->name, iter->module_name);
        } else
-               seq_printf(m, KALLSYM_FMT " %c %s\n", value,
+               seq_printf(m, "%px %c %s\n", value,
                           iter->type, iter->name);
        return 0;
 }
index fc6af9e1308b7a943d8519732a5f83885a0db2b5..15f33faf4013bdfea16baf8b0b31053456606620 100644 (file)
 #include <linux/kcov.h>
 #include <asm/setup.h>
 
+/* Number of 64-bit words written per one comparison: */
+#define KCOV_WORDS_PER_CMP 4
+
 /*
  * kcov descriptor (one per opened debugfs file).
  * State transitions of the descriptor:
  *  - initial state after open()
  *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
  *  - then, mmap() call (several calls are allowed but not useful)
- *  - then, repeated enable/disable for a task (only one task a time allowed)
+ *  - then, ioctl(KCOV_ENABLE, arg), where arg is
+ *     KCOV_TRACE_PC - to trace only the PCs
+ *     or
+ *     KCOV_TRACE_CMP - to trace only the comparison operands
+ *  - then, ioctl(KCOV_DISABLE) to disable the task.
+ * Enabling/disabling ioctls can be repeated (only one task a time allowed).
  */
 struct kcov {
        /*
@@ -48,51 +56,176 @@ struct kcov {
        struct task_struct      *t;
 };
 
-/*
- * Entry point from instrumented code.
- * This is called once per basic-block/edge.
- */
-void notrace __sanitizer_cov_trace_pc(void)
+static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 {
-       struct task_struct *t;
        enum kcov_mode mode;
 
-       t = current;
        /*
         * We are interested in code coverage as a function of a syscall inputs,
         * so we ignore code executed in interrupts.
         */
-       if (!t || !in_task())
-               return;
+       if (!in_task())
+               return false;
        mode = READ_ONCE(t->kcov_mode);
-       if (mode == KCOV_MODE_TRACE) {
-               unsigned long *area;
-               unsigned long pos;
-               unsigned long ip = _RET_IP_;
+       /*
+        * There is some code that runs in interrupts but for which
+        * in_interrupt() returns false (e.g. preempt_schedule_irq()).
+        * READ_ONCE()/barrier() effectively provides load-acquire wrt
+        * interrupts, there are paired barrier()/WRITE_ONCE() in
+        * kcov_ioctl_locked().
+        */
+       barrier();
+       return mode == needed_mode;
+}
 
+static unsigned long canonicalize_ip(unsigned long ip)
+{
 #ifdef CONFIG_RANDOMIZE_BASE
-               ip -= kaslr_offset();
+       ip -= kaslr_offset();
 #endif
+       return ip;
+}
 
-               /*
-                * There is some code that runs in interrupts but for which
-                * in_interrupt() returns false (e.g. preempt_schedule_irq()).
-                * READ_ONCE()/barrier() effectively provides load-acquire wrt
-                * interrupts, there are paired barrier()/WRITE_ONCE() in
-                * kcov_ioctl_locked().
-                */
-               barrier();
-               area = t->kcov_area;
-               /* The first word is number of subsequent PCs. */
-               pos = READ_ONCE(area[0]) + 1;
-               if (likely(pos < t->kcov_size)) {
-                       area[pos] = ip;
-                       WRITE_ONCE(area[0], pos);
-               }
+/*
+ * Entry point from instrumented code.
+ * This is called once per basic-block/edge.
+ */
+void notrace __sanitizer_cov_trace_pc(void)
+{
+       struct task_struct *t;
+       unsigned long *area;
+       unsigned long ip = canonicalize_ip(_RET_IP_);
+       unsigned long pos;
+
+       t = current;
+       if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
+               return;
+
+       area = t->kcov_area;
+       /* The first 64-bit word is the number of subsequent PCs. */
+       pos = READ_ONCE(area[0]) + 1;
+       if (likely(pos < t->kcov_size)) {
+               area[pos] = ip;
+               WRITE_ONCE(area[0], pos);
        }
 }
 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
 
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
+{
+       struct task_struct *t;
+       u64 *area;
+       u64 count, start_index, end_pos, max_pos;
+
+       t = current;
+       if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
+               return;
+
+       ip = canonicalize_ip(ip);
+
+       /*
+        * We write all comparison arguments and types as u64.
+        * The buffer was allocated for t->kcov_size unsigned longs.
+        */
+       area = (u64 *)t->kcov_area;
+       max_pos = t->kcov_size * sizeof(unsigned long);
+
+       count = READ_ONCE(area[0]);
+
+       /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
+       start_index = 1 + count * KCOV_WORDS_PER_CMP;
+       end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
+       if (likely(end_pos <= max_pos)) {
+               area[start_index] = type;
+               area[start_index + 1] = arg1;
+               area[start_index + 2] = arg2;
+               area[start_index + 3] = ip;
+               WRITE_ONCE(area[0], count + 1);
+       }
+}
+
+void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
+
+void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
+
+void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
+
+void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
+
+void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
+
+void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
+
+void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
+
+void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
+{
+       write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
+                       _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
+
+void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
+{
+       u64 i;
+       u64 count = cases[0];
+       u64 size = cases[1];
+       u64 type = KCOV_CMP_CONST;
+
+       switch (size) {
+       case 8:
+               type |= KCOV_CMP_SIZE(0);
+               break;
+       case 16:
+               type |= KCOV_CMP_SIZE(1);
+               break;
+       case 32:
+               type |= KCOV_CMP_SIZE(2);
+               break;
+       case 64:
+               type |= KCOV_CMP_SIZE(3);
+               break;
+       default:
+               return;
+       }
+       for (i = 0; i < count; i++)
+               write_comp_data(type, cases[i + 2], val, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
+#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
+
 static void kcov_get(struct kcov *kcov)
 {
        atomic_inc(&kcov->refcount);
@@ -129,6 +262,7 @@ void kcov_task_exit(struct task_struct *t)
        /* Just to not leave dangling references behind. */
        kcov_task_init(t);
        kcov->t = NULL;
+       kcov->mode = KCOV_MODE_INIT;
        spin_unlock(&kcov->lock);
        kcov_put(kcov);
 }
@@ -147,7 +281,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
 
        spin_lock(&kcov->lock);
        size = kcov->size * sizeof(unsigned long);
-       if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
+       if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
            vma->vm_end - vma->vm_start != size) {
                res = -EINVAL;
                goto exit;
@@ -176,6 +310,7 @@ static int kcov_open(struct inode *inode, struct file *filep)
        kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
        if (!kcov)
                return -ENOMEM;
+       kcov->mode = KCOV_MODE_DISABLED;
        atomic_set(&kcov->refcount, 1);
        spin_lock_init(&kcov->lock);
        filep->private_data = kcov;
@@ -211,7 +346,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
                if (size < 2 || size > INT_MAX / sizeof(unsigned long))
                        return -EINVAL;
                kcov->size = size;
-               kcov->mode = KCOV_MODE_TRACE;
+               kcov->mode = KCOV_MODE_INIT;
                return 0;
        case KCOV_ENABLE:
                /*
@@ -221,17 +356,25 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
                 * at task exit or voluntary by KCOV_DISABLE. After that it can
                 * be enabled for another task.
                 */
-               unused = arg;
-               if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
-                   kcov->area == NULL)
+               if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
                        return -EINVAL;
                if (kcov->t != NULL)
                        return -EBUSY;
+               if (arg == KCOV_TRACE_PC)
+                       kcov->mode = KCOV_MODE_TRACE_PC;
+               else if (arg == KCOV_TRACE_CMP)
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+                       kcov->mode = KCOV_MODE_TRACE_CMP;
+#else
+               return -ENOTSUPP;
+#endif
+               else
+                       return -EINVAL;
                t = current;
                /* Cache in task struct for performance. */
                t->kcov_size = kcov->size;
                t->kcov_area = kcov->area;
-               /* See comment in __sanitizer_cov_trace_pc(). */
+               /* See comment in check_kcov_mode(). */
                barrier();
                WRITE_ONCE(t->kcov_mode, kcov->mode);
                t->kcov = kcov;
@@ -249,6 +392,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
                        return -EINVAL;
                kcov_task_init(t);
                kcov->t = NULL;
+               kcov->mode = KCOV_MODE_INIT;
                kcov_put(kcov);
                return 0;
        default:
index 8af313081b0d9a7f626f6b3b496119737e9e89a6..cd50e99202b011dfdb847dd2772f14e818d268bb 100644 (file)
@@ -843,7 +843,7 @@ void __kthread_queue_delayed_work(struct kthread_worker *worker,
        struct timer_list *timer = &dwork->timer;
        struct kthread_work *work = &dwork->work;
 
-       WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn);
+       WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
 
        /*
         * If @delay is 0, queue @dwork->work immediately.  This is for
index 222aba4aa960a947488afa52e21c11a3d47d457a..dea01ac9cb74c4ef619c51b5eba4e869d9e4fdc8 100644 (file)
@@ -3481,6 +3481,8 @@ static noinline int do_init_module(struct module *mod)
        if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
                async_synchronize_full();
 
+       ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
+                       mod->init_layout.size);
        mutex_lock(&module_mutex);
        /* Drop initial reference. */
        module_put(mod);
@@ -4155,7 +4157,7 @@ static int m_show(struct seq_file *m, void *p)
 {
        struct module *mod = list_entry(p, struct module, list);
        char buf[MODULE_FLAGS_BUF_SIZE];
-       unsigned long value;
+       void *value;
 
        /* We always ignore unformed modules. */
        if (mod->state == MODULE_STATE_UNFORMED)
@@ -4171,8 +4173,8 @@ static int m_show(struct seq_file *m, void *p)
                   mod->state == MODULE_STATE_COMING ? "Loading" :
                   "Live");
        /* Used by oprofile and other similar tools. */
-       value = m->private ? 0 : (unsigned long)mod->core_layout.base;
-       seq_printf(m, " 0x" KALLSYM_FMT, value);
+       value = m->private ? NULL : mod->core_layout.base;
+       seq_printf(m, " 0x%px", value);
 
        /* Taints info */
        if (mod->taints)
index f262c9a4e70ab76d5ee5e748542845cb99e8eaff..57c0074d50cc485b706579aaa616e4a004775a8e 100644 (file)
@@ -288,9 +288,9 @@ static void invoke_padata_reorder(struct work_struct *work)
        local_bh_enable();
 }
 
-static void padata_reorder_timer(unsigned long arg)
+static void padata_reorder_timer(struct timer_list *t)
 {
-       struct parallel_data *pd = (struct parallel_data *)arg;
+       struct parallel_data *pd = from_timer(pd, t, timer);
        unsigned int weight;
        int target_cpu, cpu;
 
@@ -485,7 +485,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
 
        padata_init_pqueues(pd);
        padata_init_squeues(pd);
-       setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+       timer_setup(&pd->timer, padata_reorder_timer, 0);
        atomic_set(&pd->seq_nr, -1);
        atomic_set(&pd->reorder_objects, 0);
        atomic_set(&pd->refcnt, 0);
index bdd18afa19a486c759e7cd4b7a2983cfa0097fcb..2cfef408fec931ac0ef3f3c95b31269ed1d3ea29 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/console.h>
 #include <linux/bug.h>
 #include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <asm/sections.h>
 
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
@@ -322,6 +324,7 @@ const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
        { 'E', ' ', true },     /* TAINT_UNSIGNED_MODULE */
        { 'L', ' ', false },    /* TAINT_SOFTLOCKUP */
        { 'K', ' ', true },     /* TAINT_LIVEPATCH */
+       { 'X', ' ', true },     /* TAINT_AUX */
 };
 
 /**
@@ -343,6 +346,7 @@ const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
  *  'E' - Unsigned module has been loaded.
  *  'L' - A soft lockup has previously occurred.
  *  'K' - Kernel has been live patched.
+ *  'X' - Auxiliary taint, for distros' use.
  *
  *     The string is overwritten by the next call to print_tainted().
  */
@@ -518,7 +522,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 {
        disable_trace_on_warning();
 
-       pr_warn("------------[ cut here ]------------\n");
+       if (args)
+               pr_warn(CUT_HERE);
 
        if (file)
                pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
@@ -582,9 +587,49 @@ EXPORT_SYMBOL(warn_slowpath_fmt_taint);
 
 void warn_slowpath_null(const char *file, int line)
 {
+       pr_warn(CUT_HERE);
        __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
 }
 EXPORT_SYMBOL(warn_slowpath_null);
+#else
+void __warn_printk(const char *fmt, ...)
+{
+       va_list args;
+
+       pr_warn(CUT_HERE);
+
+       va_start(args, fmt);
+       vprintk(fmt, args);
+       va_end(args);
+}
+EXPORT_SYMBOL(__warn_printk);
+#endif
+
+#ifdef CONFIG_BUG
+
+/* Support resetting WARN*_ONCE state */
+
+static int clear_warn_once_set(void *data, u64 val)
+{
+       generic_bug_clear_once();
+       memset(__start_once, 0, __end_once - __start_once);
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clear_warn_once_fops,
+                       NULL,
+                       clear_warn_once_set,
+                       "%lld\n");
+
+static __init int register_warn_debugfs(void)
+{
+       /* Don't care about failure */
+       debugfs_create_file("clear_warn_once", 0200, NULL,
+                           NULL, &clear_warn_once_fops);
+       return 0;
+}
+
+device_initcall(register_warn_debugfs);
 #endif
 
 #ifdef CONFIG_CC_STACKPROTECTOR
index 020dedbdf066bccbc370cba20be8dc7dbc914629..b13b624e2c4902c67d2ae789c7af8fb2d6269cd8 100644 (file)
 #include <linux/proc_ns.h>
 #include <linux/proc_fs.h>
 #include <linux/sched/task.h>
+#include <linux/idr.h>
 
-#define pid_hashfn(nr, ns)     \
-       hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
-static struct hlist_head *pid_hash;
-static unsigned int pidhash_shift = 4;
 struct pid init_struct_pid = INIT_STRUCT_PID;
 
 int pid_max = PID_MAX_DEFAULT;
@@ -53,15 +50,6 @@ int pid_max = PID_MAX_DEFAULT;
 int pid_max_min = RESERVED_PIDS + 1;
 int pid_max_max = PID_MAX_LIMIT;
 
-static inline int mk_pid(struct pid_namespace *pid_ns,
-               struct pidmap *map, int off)
-{
-       return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
-}
-
-#define find_next_offset(map, off)                                     \
-               find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
-
 /*
  * PID-map pages start out as NULL, they get allocated upon
  * first use and are never deallocated. This way a low pid_max
@@ -70,11 +58,8 @@ static inline int mk_pid(struct pid_namespace *pid_ns,
  */
 struct pid_namespace init_pid_ns = {
        .kref = KREF_INIT(2),
-       .pidmap = {
-               [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
-       },
-       .last_pid = 0,
-       .nr_hashed = PIDNS_HASH_ADDING,
+       .idr = IDR_INIT,
+       .pid_allocated = PIDNS_ADDING,
        .level = 0,
        .child_reaper = &init_task,
        .user_ns = &init_user_ns,
@@ -101,138 +86,6 @@ EXPORT_SYMBOL_GPL(init_pid_ns);
 
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 
-static void free_pidmap(struct upid *upid)
-{
-       int nr = upid->nr;
-       struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
-       int offset = nr & BITS_PER_PAGE_MASK;
-
-       clear_bit(offset, map->page);
-       atomic_inc(&map->nr_free);
-}
-
-/*
- * If we started walking pids at 'base', is 'a' seen before 'b'?
- */
-static int pid_before(int base, int a, int b)
-{
-       /*
-        * This is the same as saying
-        *
-        * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
-        * and that mapping orders 'a' and 'b' with respect to 'base'.
-        */
-       return (unsigned)(a - base) < (unsigned)(b - base);
-}
-
-/*
- * We might be racing with someone else trying to set pid_ns->last_pid
- * at the pid allocation time (there's also a sysctl for this, but racing
- * with this one is OK, see comment in kernel/pid_namespace.c about it).
- * We want the winner to have the "later" value, because if the
- * "earlier" value prevails, then a pid may get reused immediately.
- *
- * Since pids rollover, it is not sufficient to just pick the bigger
- * value.  We have to consider where we started counting from.
- *
- * 'base' is the value of pid_ns->last_pid that we observed when
- * we started looking for a pid.
- *
- * 'pid' is the pid that we eventually found.
- */
-static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
-{
-       int prev;
-       int last_write = base;
-       do {
-               prev = last_write;
-               last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
-       } while ((prev != last_write) && (pid_before(base, last_write, pid)));
-}
-
-static int alloc_pidmap(struct pid_namespace *pid_ns)
-{
-       int i, offset, max_scan, pid, last = pid_ns->last_pid;
-       struct pidmap *map;
-
-       pid = last + 1;
-       if (pid >= pid_max)
-               pid = RESERVED_PIDS;
-       offset = pid & BITS_PER_PAGE_MASK;
-       map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
-       /*
-        * If last_pid points into the middle of the map->page we
-        * want to scan this bitmap block twice, the second time
-        * we start with offset == 0 (or RESERVED_PIDS).
-        */
-       max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
-       for (i = 0; i <= max_scan; ++i) {
-               if (unlikely(!map->page)) {
-                       void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
-                       /*
-                        * Free the page if someone raced with us
-                        * installing it:
-                        */
-                       spin_lock_irq(&pidmap_lock);
-                       if (!map->page) {
-                               map->page = page;
-                               page = NULL;
-                       }
-                       spin_unlock_irq(&pidmap_lock);
-                       kfree(page);
-                       if (unlikely(!map->page))
-                               return -ENOMEM;
-               }
-               if (likely(atomic_read(&map->nr_free))) {
-                       for ( ; ; ) {
-                               if (!test_and_set_bit(offset, map->page)) {
-                                       atomic_dec(&map->nr_free);
-                                       set_last_pid(pid_ns, last, pid);
-                                       return pid;
-                               }
-                               offset = find_next_offset(map, offset);
-                               if (offset >= BITS_PER_PAGE)
-                                       break;
-                               pid = mk_pid(pid_ns, map, offset);
-                               if (pid >= pid_max)
-                                       break;
-                       }
-               }
-               if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
-                       ++map;
-                       offset = 0;
-               } else {
-                       map = &pid_ns->pidmap[0];
-                       offset = RESERVED_PIDS;
-                       if (unlikely(last == offset))
-                               break;
-               }
-               pid = mk_pid(pid_ns, map, offset);
-       }
-       return -EAGAIN;
-}
-
-int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
-{
-       int offset;
-       struct pidmap *map, *end;
-
-       if (last >= PID_MAX_LIMIT)
-               return -1;
-
-       offset = (last + 1) & BITS_PER_PAGE_MASK;
-       map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
-       end = &pid_ns->pidmap[PIDMAP_ENTRIES];
-       for (; map < end; map++, offset = 0) {
-               if (unlikely(!map->page))
-                       continue;
-               offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
-               if (offset < BITS_PER_PAGE)
-                       return mk_pid(pid_ns, map, offset);
-       }
-       return -1;
-}
-
 void put_pid(struct pid *pid)
 {
        struct pid_namespace *ns;
@@ -265,8 +118,7 @@ void free_pid(struct pid *pid)
        for (i = 0; i <= pid->level; i++) {
                struct upid *upid = pid->numbers + i;
                struct pid_namespace *ns = upid->ns;
-               hlist_del_rcu(&upid->pid_chain);
-               switch(--ns->nr_hashed) {
+               switch (--ns->pid_allocated) {
                case 2:
                case 1:
                        /* When all that is left in the pid namespace
@@ -275,21 +127,20 @@ void free_pid(struct pid *pid)
                         */
                        wake_up_process(ns->child_reaper);
                        break;
-               case PIDNS_HASH_ADDING:
+               case PIDNS_ADDING:
                        /* Handle a fork failure of the first process */
                        WARN_ON(ns->child_reaper);
-                       ns->nr_hashed = 0;
+                       ns->pid_allocated = 0;
                        /* fall through */
                case 0:
                        schedule_work(&ns->proc_work);
                        break;
                }
+
+               idr_remove(&ns->idr, upid->nr);
        }
        spin_unlock_irqrestore(&pidmap_lock, flags);
 
-       for (i = 0; i <= pid->level; i++)
-               free_pidmap(pid->numbers + i);
-
        call_rcu(&pid->rcu, delayed_put_pid);
 }
 
@@ -308,8 +159,29 @@ struct pid *alloc_pid(struct pid_namespace *ns)
 
        tmp = ns;
        pid->level = ns->level;
+
        for (i = ns->level; i >= 0; i--) {
-               nr = alloc_pidmap(tmp);
+               int pid_min = 1;
+
+               idr_preload(GFP_KERNEL);
+               spin_lock_irq(&pidmap_lock);
+
+               /*
+                * init really needs pid 1, but after reaching the maximum
+                * wrap back to RESERVED_PIDS
+                */
+               if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
+                       pid_min = RESERVED_PIDS;
+
+               /*
+                * Store a null pointer so find_pid_ns does not find
+                * a partially initialized PID (see below).
+                */
+               nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
+                                     pid_max, GFP_ATOMIC);
+               spin_unlock_irq(&pidmap_lock);
+               idr_preload_end();
+
                if (nr < 0) {
                        retval = nr;
                        goto out_free;
@@ -334,12 +206,12 @@ struct pid *alloc_pid(struct pid_namespace *ns)
 
        upid = pid->numbers + ns->level;
        spin_lock_irq(&pidmap_lock);
-       if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
+       if (!(ns->pid_allocated & PIDNS_ADDING))
                goto out_unlock;
        for ( ; upid >= pid->numbers; --upid) {
-               hlist_add_head_rcu(&upid->pid_chain,
-                               &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
-               upid->ns->nr_hashed++;
+               /* Make the PID visible to find_pid_ns. */
+               idr_replace(&upid->ns->idr, pid, upid->nr);
+               upid->ns->pid_allocated++;
        }
        spin_unlock_irq(&pidmap_lock);
 
@@ -350,8 +222,11 @@ out_unlock:
        put_pid_ns(ns);
 
 out_free:
+       spin_lock_irq(&pidmap_lock);
        while (++i <= ns->level)
-               free_pidmap(pid->numbers + i);
+               idr_remove(&ns->idr, (pid->numbers + i)->nr);
+
+       spin_unlock_irq(&pidmap_lock);
 
        kmem_cache_free(ns->pid_cachep, pid);
        return ERR_PTR(retval);
@@ -360,21 +235,13 @@ out_free:
 void disable_pid_allocation(struct pid_namespace *ns)
 {
        spin_lock_irq(&pidmap_lock);
-       ns->nr_hashed &= ~PIDNS_HASH_ADDING;
+       ns->pid_allocated &= ~PIDNS_ADDING;
        spin_unlock_irq(&pidmap_lock);
 }
 
 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
 {
-       struct upid *pnr;
-
-       hlist_for_each_entry_rcu(pnr,
-                       &pid_hash[pid_hashfn(nr, ns)], pid_chain)
-               if (pnr->nr == nr && pnr->ns == ns)
-                       return container_of(pnr, struct pid,
-                                       numbers[ns->level]);
-
-       return NULL;
+       return idr_find(&ns->idr, nr);
 }
 EXPORT_SYMBOL_GPL(find_pid_ns);
 
@@ -530,6 +397,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
                if (type != PIDTYPE_PID) {
                        if (type == __PIDTYPE_TGID)
                                type = PIDTYPE_PID;
+
                        task = task->group_leader;
                }
                nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
@@ -553,35 +421,13 @@ EXPORT_SYMBOL_GPL(task_active_pid_ns);
  */
 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
 {
-       struct pid *pid;
-
-       do {
-               pid = find_pid_ns(nr, ns);
-               if (pid)
-                       break;
-               nr = next_pidmap(ns, nr);
-       } while (nr > 0);
-
-       return pid;
-}
-
-/*
- * The pid hash table is scaled according to the amount of memory in the
- * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
- * more.
- */
-void __init pidhash_init(void)
-{
-       pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
-                                          HASH_EARLY | HASH_SMALL | HASH_ZERO,
-                                          &pidhash_shift, NULL,
-                                          0, 4096);
+       return idr_get_next(&ns->idr, &nr);
 }
 
-void __init pidmap_init(void)
+void __init pid_idr_init(void)
 {
        /* Verify no one has done anything silly: */
-       BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
+       BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
 
        /* bump default and minimum pid_max based on number of cpus */
        pid_max = min(pid_max_max, max_t(int, pid_max,
@@ -590,10 +436,7 @@ void __init pidmap_init(void)
                                PIDS_PER_CPU_MIN * num_possible_cpus());
        pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
 
-       init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       /* Reserve PID 0. We never call free_pidmap(0) */
-       set_bit(0, init_pid_ns.pidmap[0].page);
-       atomic_dec(&init_pid_ns.pidmap[0].nr_free);
+       idr_init(&init_pid_ns.idr);
 
        init_pid_ns.pid_cachep = KMEM_CACHE(pid,
                        SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
index 4918314893bc6620ae95660c78588d6d5ac9129c..0b53eef7d34b1a5d60d97c650fbc2354238b9f27 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/export.h>
 #include <linux/sched/task.h>
 #include <linux/sched/signal.h>
+#include <linux/idr.h>
 
 struct pid_cache {
        int nr_ids;
@@ -98,7 +99,6 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
        struct pid_namespace *ns;
        unsigned int level = parent_pid_ns->level + 1;
        struct ucounts *ucounts;
-       int i;
        int err;
 
        err = -EINVAL;
@@ -117,17 +117,15 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
        if (ns == NULL)
                goto out_dec;
 
-       ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!ns->pidmap[0].page)
-               goto out_free;
+       idr_init(&ns->idr);
 
        ns->pid_cachep = create_pid_cachep(level + 1);
        if (ns->pid_cachep == NULL)
-               goto out_free_map;
+               goto out_free_idr;
 
        err = ns_alloc_inum(&ns->ns);
        if (err)
-               goto out_free_map;
+               goto out_free_idr;
        ns->ns.ops = &pidns_operations;
 
        kref_init(&ns->kref);
@@ -135,20 +133,13 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
        ns->parent = get_pid_ns(parent_pid_ns);
        ns->user_ns = get_user_ns(user_ns);
        ns->ucounts = ucounts;
-       ns->nr_hashed = PIDNS_HASH_ADDING;
+       ns->pid_allocated = PIDNS_ADDING;
        INIT_WORK(&ns->proc_work, proc_cleanup_work);
 
-       set_bit(0, ns->pidmap[0].page);
-       atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
-
-       for (i = 1; i < PIDMAP_ENTRIES; i++)
-               atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
-
        return ns;
 
-out_free_map:
-       kfree(ns->pidmap[0].page);
-out_free:
+out_free_idr:
+       idr_destroy(&ns->idr);
        kmem_cache_free(pid_ns_cachep, ns);
 out_dec:
        dec_pid_namespaces(ucounts);
@@ -168,11 +159,9 @@ static void delayed_free_pidns(struct rcu_head *p)
 
 static void destroy_pid_namespace(struct pid_namespace *ns)
 {
-       int i;
-
        ns_free_inum(&ns->ns);
-       for (i = 0; i < PIDMAP_ENTRIES; i++)
-               kfree(ns->pidmap[i].page);
+
+       idr_destroy(&ns->idr);
        call_rcu(&ns->rcu, delayed_free_pidns);
 }
 
@@ -213,6 +202,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        int rc;
        struct task_struct *task, *me = current;
        int init_pids = thread_group_leader(me) ? 1 : 2;
+       struct pid *pid;
 
        /* Don't allow any more processes into the pid namespace */
        disable_pid_allocation(pid_ns);
@@ -239,20 +229,16 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
         *        maintain a tasklist for each pid namespace.
         *
         */
+       rcu_read_lock();
        read_lock(&tasklist_lock);
-       nr = next_pidmap(pid_ns, 1);
-       while (nr > 0) {
-               rcu_read_lock();
-
-               task = pid_task(find_vpid(nr), PIDTYPE_PID);
+       nr = 2;
+       idr_for_each_entry_continue(&pid_ns->idr, pid, nr) {
+               task = pid_task(pid, PIDTYPE_PID);
                if (task && !__fatal_signal_pending(task))
                        send_sig_info(SIGKILL, SEND_SIG_FORCED, task);
-
-               rcu_read_unlock();
-
-               nr = next_pidmap(pid_ns, nr);
        }
        read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        /*
         * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
@@ -268,7 +254,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
         * sys_wait4() above can't reap the EXIT_DEAD children but we do not
         * really care, we could reparent them to the global init. We could
         * exit and reap ->child_reaper even if it is not the last thread in
-        * this pid_ns, free_pid(nr_hashed == 0) calls proc_cleanup_work(),
+        * this pid_ns, free_pid(pid_allocated == 0) calls proc_cleanup_work(),
         * pid_ns can not go away until proc_kill_sb() drops the reference.
         *
         * But this ns can also have other tasks injected by setns()+fork().
@@ -282,7 +268,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
         */
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
-               if (pid_ns->nr_hashed == init_pids)
+               if (pid_ns->pid_allocated == init_pids)
                        break;
                schedule();
        }
@@ -301,6 +287,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(current);
        struct ctl_table tmp = *table;
+       int ret, next;
 
        if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
                return -EPERM;
@@ -311,8 +298,14 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
         * it should synchronize its usage with external means.
         */
 
-       tmp.data = &pid_ns->last_pid;
-       return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+       next = idr_get_cursor(&pid_ns->idr) - 1;
+
+       tmp.data = &next;
+       ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+       if (!ret && write)
+               idr_set_cursor(&pid_ns->idr, next + 1);
+
+       return ret;
 }
 
 extern int pid_max;
index 512f7c2baedd59bef5300e361c22e92b7a4845a0..5d81206a572d721e7d96b129f160a4e16d2e2f2e 100644 (file)
@@ -2190,7 +2190,7 @@ again:
                }
 
                if (console_seq < log_first_seq) {
-                       len = sprintf(text, "** %u printk messages dropped ** ",
+                       len = sprintf(text, "** %u printk messages dropped **\n",
                                      (unsigned)(log_first_seq - console_seq));
 
                        /* messages are gone, move to first one */
index 3cdaeaef9ce1a63bfde3bc631946e40ed731e035..3e3c2004bb232661bae666e00e4645fad438aee6 100644 (file)
@@ -39,7 +39,7 @@
  * There are situations when we want to make sure that all buffers
  * were handled or when IRQs are blocked.
  */
-static int printk_safe_irq_ready;
+static int printk_safe_irq_ready __read_mostly;
 
 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -    \
                                sizeof(atomic_t) -                      \
@@ -63,11 +63,8 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
 /* Get flushed in a more safe context. */
 static void queue_flush_work(struct printk_safe_seq_buf *s)
 {
-       if (printk_safe_irq_ready) {
-               /* Make sure that IRQ work is really initialized. */
-               smp_rmb();
+       if (printk_safe_irq_ready)
                irq_work_queue(&s->work);
-       }
 }
 
 /*
@@ -75,7 +72,7 @@ static void queue_flush_work(struct printk_safe_seq_buf *s)
  * have dedicated buffers, because otherwise printk-safe preempted by
  * NMI-printk would have overwritten the NMI messages.
  *
- * The messages are fushed from irq work (or from panic()), possibly,
+ * The messages are flushed from irq work (or from panic()), possibly,
  * from other CPU, concurrently with printk_safe_log_store(). Should this
  * happen, printk_safe_log_store() will notice the buffer->len mismatch
  * and repeat the write.
@@ -398,8 +395,12 @@ void __init printk_safe_init(void)
 #endif
        }
 
-       /* Make sure that IRQ works are initialized before enabling. */
-       smp_wmb();
+       /*
+        * In the highly unlikely event that a NMI were to trigger at
+        * this moment. Make sure IRQ work is set up before this
+        * variable is set.
+        */
+       barrier();
        printk_safe_irq_ready = 1;
 
        /* Flush pending messages that did not have scheduled IRQ works. */
index bd30a973fe946b03916a1eeb873928adfe1b32b0..e4ced883d8de1ca9280fd18c43fafa4ce1c20e76 100644 (file)
@@ -104,6 +104,33 @@ int unregister_reboot_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_reboot_notifier);
 
+static void devm_unregister_reboot_notifier(struct device *dev, void *res)
+{
+       WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res));
+}
+
+int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb)
+{
+       struct notifier_block **rcnb;
+       int ret;
+
+       rcnb = devres_alloc(devm_unregister_reboot_notifier,
+                           sizeof(*rcnb), GFP_KERNEL);
+       if (!rcnb)
+               return -ENOMEM;
+
+       ret = register_reboot_notifier(nb);
+       if (!ret) {
+               *rcnb = nb;
+               devres_add(dev, rcnb);
+       } else {
+               devres_free(rcnb);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(devm_register_reboot_notifier);
+
 /*
  *     Notifier list for kernel code which wants to be called
  *     to restart the system.
index babb36d3d03911df511f55b25e68ff179c6cc9da..9558664bd9ecd21d5cee8a504084b2ee4a2d0a50 100644 (file)
@@ -78,7 +78,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
        handler = sig_handler(t, sig);
 
        if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
-                       handler == SIG_DFL && !force)
+           handler == SIG_DFL && !(force && sig_kernel_only(sig)))
                return 1;
 
        return sig_handler_ignored(handler, sig);
@@ -94,13 +94,15 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
        if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
                return 0;
 
-       if (!sig_task_ignored(t, sig, force))
-               return 0;
-
        /*
-        * Tracers may want to know about even ignored signals.
+        * Tracers may want to know about even ignored signal unless it
+        * is SIGKILL which can't be reported anyway but can be ignored
+        * by SIGNAL_UNKILLABLE task.
         */
-       return !t->ptrace;
+       if (t->ptrace && sig != SIGKILL)
+               return 0;
+
+       return sig_task_ignored(t, sig, force);
 }
 
 /*
@@ -929,9 +931,9 @@ static void complete_signal(int sig, struct task_struct *p, int group)
         * then start taking the whole group down immediately.
         */
        if (sig_fatal(p, sig) &&
-           !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
+           !(signal->flags & SIGNAL_GROUP_EXIT) &&
            !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL || !t->ptrace)) {
+           (sig == SIGKILL || !p->ptrace)) {
                /*
                 * This signal will be fatal to the whole group.
                 */
index 4a13a389e99b243a5e261d3b3a12b67e2e681c11..557d4672857793f746b9dfe0d8d1a0527bd2ac87 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/kexec.h>
 #include <linux/bpf.h>
 #include <linux/mount.h>
+#include <linux/pipe_fs_i.h>
 
 #include <linux/uaccess.h>
 #include <asm/processor.h>
@@ -1816,7 +1817,7 @@ static struct ctl_table fs_table[] = {
        {
                .procname       = "pipe-max-size",
                .data           = &pipe_max_size,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(pipe_max_size),
                .mode           = 0644,
                .proc_handler   = &pipe_proc_fn,
                .extra1         = &pipe_min_size,
@@ -2575,12 +2576,13 @@ static int do_proc_douintvec_minmax_conv(unsigned long *lvalp,
        if (write) {
                unsigned int val = *lvalp;
 
+               if (*lvalp > UINT_MAX)
+                       return -EINVAL;
+
                if ((param->min && *param->min > val) ||
                    (param->max && *param->max < val))
                        return -ERANGE;
 
-               if (*lvalp > UINT_MAX)
-                       return -EINVAL;
                *valp = val;
        } else {
                unsigned int val = *valp;
@@ -2620,6 +2622,48 @@ int proc_douintvec_minmax(struct ctl_table *table, int write,
                                 do_proc_douintvec_minmax_conv, &param);
 }
 
+struct do_proc_dopipe_max_size_conv_param {
+       unsigned int *min;
+};
+
+static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
+                                       unsigned int *valp,
+                                       int write, void *data)
+{
+       struct do_proc_dopipe_max_size_conv_param *param = data;
+
+       if (write) {
+               unsigned int val;
+
+               if (*lvalp > UINT_MAX)
+                       return -EINVAL;
+
+               val = round_pipe_size(*lvalp);
+               if (val == 0)
+                       return -EINVAL;
+
+               if (param->min && *param->min > val)
+                       return -ERANGE;
+
+               *valp = val;
+       } else {
+               unsigned int val = *valp;
+               *lvalp = (unsigned long) val;
+       }
+
+       return 0;
+}
+
+int proc_dopipe_max_size(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct do_proc_dopipe_max_size_conv_param param = {
+               .min = (unsigned int *) table->extra1,
+       };
+       return do_proc_douintvec(table, write, buffer, lenp, ppos,
+                                do_proc_dopipe_max_size_conv, &param);
+}
+
 static void validate_coredump_safety(void)
 {
 #ifdef CONFIG_COREDUMP
@@ -3083,14 +3127,12 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
                        else
                                bitmap_copy(bitmap, tmp_bitmap, bitmap_len);
                }
-               kfree(tmp_bitmap);
                *lenp -= left;
                *ppos += *lenp;
-               return 0;
-       } else {
-               kfree(tmp_bitmap);
-               return err;
        }
+
+       kfree(tmp_bitmap);
+       return err;
 }
 
 #else /* CONFIG_PROC_SYSCTL */
@@ -3125,6 +3167,12 @@ int proc_douintvec_minmax(struct ctl_table *table, int write,
        return -ENOSYS;
 }
 
+int proc_dopipe_max_size(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       return -ENOSYS;
+}
+
 int proc_dointvec_jiffies(struct ctl_table *table, int write,
                    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -3168,6 +3216,7 @@ EXPORT_SYMBOL(proc_douintvec);
 EXPORT_SYMBOL(proc_dointvec_jiffies);
 EXPORT_SYMBOL(proc_dointvec_minmax);
 EXPORT_SYMBOL_GPL(proc_douintvec_minmax);
+EXPORT_SYMBOL_GPL(proc_dopipe_max_size);
 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
 EXPORT_SYMBOL(proc_dostring);
index d689a9557e170b9d89ddb7cb42e518a82d5851cb..e776fc8cc1df3bc6dd09e7722250f87f183fb5ef 100644 (file)
@@ -21,10 +21,6 @@ config CLOCKSOURCE_VALIDATE_LAST_CYCLE
 config GENERIC_TIME_VSYSCALL
        bool
 
-# Timekeeping vsyscall support
-config GENERIC_TIME_VSYSCALL_OLD
-       bool
-
 # Old style timekeeping
 config ARCH_USES_GETTIMEOFFSET
        bool
index 03918a19cf2da854bcefa9f8188daa53a7db82f4..65f9e3f24dde8bc8f908f4d68cda2fa4f39843ce 100644 (file)
@@ -171,7 +171,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
        spin_unlock_irqrestore(&watchdog_lock, flags);
 }
 
-static void clocksource_watchdog(unsigned long data)
+static void clocksource_watchdog(struct timer_list *unused)
 {
        struct clocksource *cs;
        u64 csnow, wdnow, cslast, wdlast, delta;
@@ -290,8 +290,7 @@ static inline void clocksource_start_watchdog(void)
 {
        if (watchdog_running || !watchdog || list_empty(&watchdog_list))
                return;
-       init_timer(&watchdog_timer);
-       watchdog_timer.function = clocksource_watchdog;
+       timer_setup(&watchdog_timer, clocksource_watchdog, 0);
        watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
        add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
        watchdog_running = 1;
index 198afa78bf69e425b8cc1b0dfed538467df12bf7..cd03317e7b57deaec813644758080f605490f446 100644 (file)
@@ -557,45 +557,6 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
        update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 }
 
-#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
-#warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon.
-
-static inline void update_vsyscall(struct timekeeper *tk)
-{
-       struct timespec xt, wm;
-
-       xt = timespec64_to_timespec(tk_xtime(tk));
-       wm = timespec64_to_timespec(tk->wall_to_monotonic);
-       update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
-                           tk->tkr_mono.cycle_last);
-}
-
-static inline void old_vsyscall_fixup(struct timekeeper *tk)
-{
-       s64 remainder;
-
-       /*
-       * Store only full nanoseconds into xtime_nsec after rounding
-       * it up and add the remainder to the error difference.
-       * XXX - This is necessary to avoid small 1ns inconsistnecies caused
-       * by truncating the remainder in vsyscalls. However, it causes
-       * additional work to be done in timekeeping_adjust(). Once
-       * the vsyscall implementations are converted to use xtime_nsec
-       * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
-       * users are removed, this can be killed.
-       */
-       remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
-       if (remainder != 0) {
-               tk->tkr_mono.xtime_nsec -= remainder;
-               tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
-               tk->ntp_error += remainder << tk->ntp_error_shift;
-               tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
-       }
-}
-#else
-#define old_vsyscall_fixup(tk)
-#endif
-
 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 
 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
@@ -2163,12 +2124,6 @@ void update_wall_time(void)
        /* correct the clock when NTP error is too big */
        timekeeping_adjust(tk, offset);
 
-       /*
-        * XXX This can be killed once everyone converts
-        * to the new update_vsyscall.
-        */
-       old_vsyscall_fixup(tk);
-
        /*
         * Finally, make sure that after the rounding
         * xtime_nsec isn't larger than NSEC_PER_SEC
index af0b8bae45027042ff153172522b659fb605cf1d..ffebcf878fba5d5cf67f5e9abcece25c16259919 100644 (file)
@@ -707,14 +707,18 @@ static inline void debug_timer_assert_init(struct timer_list *timer)
        debug_object_assert_init(timer, &timer_debug_descr);
 }
 
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+                         void (*func)(struct timer_list *),
+                         unsigned int flags,
                          const char *name, struct lock_class_key *key);
 
-void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
+void init_timer_on_stack_key(struct timer_list *timer,
+                            void (*func)(struct timer_list *),
+                            unsigned int flags,
                             const char *name, struct lock_class_key *key)
 {
        debug_object_init_on_stack(timer, &timer_debug_descr);
-       do_init_timer(timer, flags, name, key);
+       do_init_timer(timer, func, flags, name, key);
 }
 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 
@@ -755,10 +759,13 @@ static inline void debug_assert_init(struct timer_list *timer)
        debug_timer_assert_init(timer);
 }
 
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+                         void (*func)(struct timer_list *),
+                         unsigned int flags,
                          const char *name, struct lock_class_key *key)
 {
        timer->entry.pprev = NULL;
+       timer->function = func;
        timer->flags = flags | raw_smp_processor_id();
        lockdep_init_map(&timer->lockdep_map, name, key, 0);
 }
@@ -766,6 +773,7 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
 /**
  * init_timer_key - initialize a timer
  * @timer: the timer to be initialized
+ * @func: timer callback function
  * @flags: timer flags
  * @name: name of the timer
  * @key: lockdep class key of the fake lock used for tracking timer
@@ -774,11 +782,12 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
  * init_timer_key() must be done to a timer prior calling *any* of the
  * other timer functions.
  */
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+void init_timer_key(struct timer_list *timer,
+                   void (*func)(struct timer_list *), unsigned int flags,
                    const char *name, struct lock_class_key *key)
 {
        debug_init(timer);
-       do_init_timer(timer, flags, name, key);
+       do_init_timer(timer, func, flags, name, key);
 }
 EXPORT_SYMBOL(init_timer_key);
 
@@ -1107,12 +1116,12 @@ EXPORT_SYMBOL(timer_reduce);
  * add_timer - start a timer
  * @timer: the timer to be added
  *
- * The kernel will do a ->function(->data) callback from the
+ * The kernel will do a ->function(@timer) callback from the
  * timer interrupt at the ->expires point in the future. The
  * current time is 'jiffies'.
  *
- * The timer's ->expires, ->function (and if the handler uses it, ->data)
- * fields must be set prior calling this function.
+ * The timer's ->expires, ->function fields must be set prior calling this
+ * function.
  *
  * Timers with an ->expires field in the past will be executed in the next
  * timer tick.
@@ -1284,8 +1293,7 @@ int del_timer_sync(struct timer_list *timer)
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
-static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
-                         unsigned long data)
+static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list *))
 {
        int count = preempt_count();
 
@@ -1309,7 +1317,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
        lock_map_acquire(&lockdep_map);
 
        trace_timer_expire_entry(timer);
-       fn(data);
+       fn(timer);
        trace_timer_expire_exit(timer);
 
        lock_map_release(&lockdep_map);
@@ -1331,8 +1339,7 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
 {
        while (!hlist_empty(head)) {
                struct timer_list *timer;
-               void (*fn)(unsigned long);
-               unsigned long data;
+               void (*fn)(struct timer_list *);
 
                timer = hlist_entry(head->first, struct timer_list, entry);
 
@@ -1340,15 +1347,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
                detach_timer(timer, true);
 
                fn = timer->function;
-               data = timer->data;
 
                if (timer->flags & TIMER_IRQSAFE) {
                        raw_spin_unlock(&base->lock);
-                       call_timer_fn(timer, fn, data);
+                       call_timer_fn(timer, fn);
                        raw_spin_lock(&base->lock);
                } else {
                        raw_spin_unlock_irq(&base->lock);
-                       call_timer_fn(timer, fn, data);
+                       call_timer_fn(timer, fn);
                        raw_spin_lock_irq(&base->lock);
                }
        }
index 0e7f5428a1484ed85215a72e115759eace1392fb..0ed768b56c6061c9da5669649a15590e6f720264 100644 (file)
@@ -389,7 +389,7 @@ static int __init init_timer_list_procfs(void)
 {
        struct proc_dir_entry *pe;
 
-       pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
+       pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
        if (!pe)
                return -ENOMEM;
        return 0;
index f54b7b6b4a4bcbe50b207a2a40d8d64bf5acd12c..af7dad126c13cecbe73f5d797778f005b5838377 100644 (file)
@@ -160,6 +160,17 @@ config FUNCTION_GRAPH_TRACER
          address on the current task structure into a stack of calls.
 
 
+config PREEMPTIRQ_EVENTS
+       bool "Enable trace events for preempt and irq disable/enable"
+       select TRACE_IRQFLAGS
+       depends on DEBUG_PREEMPT || !PROVE_LOCKING
+       default n
+       help
+         Enable tracing of disable and enable events for preemption and irqs.
+         For tracing preempt disable/enable events, DEBUG_PREEMPT must be
+         enabled. For tracing irq disable/enable events, PROVE_LOCKING must
+         be disabled.
+
 config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
        default n
index 19a15b2f119010f5dd38d2dfe99ff7d2eb81a5f0..e2538c7638d44d635542382a11df6d803d6af1c5 100644 (file)
@@ -35,6 +35,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
index a5580c6708668033313e9c5185f2122d364ab72f..27d1f4ffa3def946525b2d248757fac3620504e5 100644 (file)
@@ -78,16 +78,12 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
 
 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 {
-       int ret = 0;
-
-       if (unlikely(size == 0))
-               goto out;
+       int ret;
 
        ret = probe_kernel_read(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
                memset(dst, 0, size);
 
- out:
        return ret;
 }
 
@@ -407,7 +403,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
        .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
        .arg4_type      = ARG_PTR_TO_MEM,
-       .arg5_type      = ARG_CONST_SIZE,
+       .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 };
 
 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
@@ -498,7 +494,7 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
        .gpl_only       = true,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
-       .arg2_type      = ARG_CONST_SIZE,
+       .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
        .arg3_type      = ARG_ANYTHING,
 };
 
@@ -609,7 +605,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
        .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
        .arg4_type      = ARG_PTR_TO_MEM,
-       .arg5_type      = ARG_CONST_SIZE,
+       .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 };
 
 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
index 8319e09e15b945f14f9046edeb885e173ef26652..ccdf3664e4a9a7f6a79423dec807cd9f0c3ecd6f 100644 (file)
@@ -203,30 +203,6 @@ void clear_ftrace_function(void)
        ftrace_trace_function = ftrace_stub;
 }
 
-static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               *per_cpu_ptr(ops->disabled, cpu) = 1;
-}
-
-static int per_cpu_ops_alloc(struct ftrace_ops *ops)
-{
-       int __percpu *disabled;
-
-       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
-               return -EINVAL;
-
-       disabled = alloc_percpu(int);
-       if (!disabled)
-               return -ENOMEM;
-
-       ops->disabled = disabled;
-       per_cpu_ops_disable_all(ops);
-       return 0;
-}
-
 static void ftrace_sync(struct work_struct *work)
 {
        /*
@@ -262,8 +238,8 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
         * If this is a dynamic, RCU, or per CPU ops, or we force list func,
         * then it needs to call the list anyway.
         */
-       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
-                         FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
+       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
+           FTRACE_FORCE_LIST_FUNC)
                return ftrace_ops_list_func;
 
        return ftrace_ops_get_func(ops);
@@ -422,11 +398,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if (!core_kernel_data((unsigned long)ops))
                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 
-       if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
-               if (per_cpu_ops_alloc(ops))
-                       return -ENOMEM;
-       }
-
        add_ftrace_ops(&ftrace_ops_list, ops);
 
        /* Always save the function, and reset at unregistering */
@@ -2727,11 +2698,6 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
 {
 }
 
-static void per_cpu_ops_free(struct ftrace_ops *ops)
-{
-       free_percpu(ops->disabled);
-}
-
 static void ftrace_startup_enable(int command)
 {
        if (saved_ftrace_func != ftrace_trace_function) {
@@ -2833,7 +2799,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
                 * not currently active, we can just free them
                 * without synchronizing all CPUs.
                 */
-               if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
+               if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
                        goto free_ops;
 
                return 0;
@@ -2880,7 +2846,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
         * The same goes for freeing the per_cpu data of the per_cpu
         * ops.
         */
-       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
+       if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
                /*
                 * We need to do a hard force of sched synchronization.
                 * This is because we use preempt_disable() to do RCU, but
@@ -2903,9 +2869,6 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 
  free_ops:
                arch_ftrace_trampoline_free(ops);
-
-               if (ops->flags & FTRACE_OPS_FL_PER_CPU)
-                       per_cpu_ops_free(ops);
        }
 
        return 0;
@@ -5672,10 +5635,29 @@ static int ftrace_process_locs(struct module *mod,
        return ret;
 }
 
+struct ftrace_mod_func {
+       struct list_head        list;
+       char                    *name;
+       unsigned long           ip;
+       unsigned int            size;
+};
+
+struct ftrace_mod_map {
+       struct rcu_head         rcu;
+       struct list_head        list;
+       struct module           *mod;
+       unsigned long           start_addr;
+       unsigned long           end_addr;
+       struct list_head        funcs;
+       unsigned int            num_funcs;
+};
+
 #ifdef CONFIG_MODULES
 
 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
 
+static LIST_HEAD(ftrace_mod_maps);
+
 static int referenced_filters(struct dyn_ftrace *rec)
 {
        struct ftrace_ops *ops;
@@ -5729,8 +5711,26 @@ static void clear_mod_from_hashes(struct ftrace_page *pg)
        mutex_unlock(&trace_types_lock);
 }
 
+static void ftrace_free_mod_map(struct rcu_head *rcu)
+{
+       struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
+       struct ftrace_mod_func *mod_func;
+       struct ftrace_mod_func *n;
+
+       /* All the contents of mod_map are now not visible to readers */
+       list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
+               kfree(mod_func->name);
+               list_del(&mod_func->list);
+               kfree(mod_func);
+       }
+
+       kfree(mod_map);
+}
+
 void ftrace_release_mod(struct module *mod)
 {
+       struct ftrace_mod_map *mod_map;
+       struct ftrace_mod_map *n;
        struct dyn_ftrace *rec;
        struct ftrace_page **last_pg;
        struct ftrace_page *tmp_page = NULL;
@@ -5742,6 +5742,14 @@ void ftrace_release_mod(struct module *mod)
        if (ftrace_disabled)
                goto out_unlock;
 
+       list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
+               if (mod_map->mod == mod) {
+                       list_del_rcu(&mod_map->list);
+                       call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
+                       break;
+               }
+       }
+
        /*
         * Each module has its own ftrace_pages, remove
         * them from the list.
@@ -5749,7 +5757,8 @@ void ftrace_release_mod(struct module *mod)
        last_pg = &ftrace_pages_start;
        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
                rec = &pg->records[0];
-               if (within_module_core(rec->ip, mod)) {
+               if (within_module_core(rec->ip, mod) ||
+                   within_module_init(rec->ip, mod)) {
                        /*
                         * As core pages are first, the first
                         * page should never be a module page.
@@ -5818,7 +5827,8 @@ void ftrace_module_enable(struct module *mod)
                 * not part of this module, then skip this pg,
                 * which the "break" will do.
                 */
-               if (!within_module_core(rec->ip, mod))
+               if (!within_module_core(rec->ip, mod) &&
+                   !within_module_init(rec->ip, mod))
                        break;
 
                cnt = 0;
@@ -5863,23 +5873,245 @@ void ftrace_module_init(struct module *mod)
        ftrace_process_locs(mod, mod->ftrace_callsites,
                            mod->ftrace_callsites + mod->num_ftrace_callsites);
 }
+
+static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
+                               struct dyn_ftrace *rec)
+{
+       struct ftrace_mod_func *mod_func;
+       unsigned long symsize;
+       unsigned long offset;
+       char str[KSYM_SYMBOL_LEN];
+       char *modname;
+       const char *ret;
+
+       ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
+       if (!ret)
+               return;
+
+       mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
+       if (!mod_func)
+               return;
+
+       mod_func->name = kstrdup(str, GFP_KERNEL);
+       if (!mod_func->name) {
+               kfree(mod_func);
+               return;
+       }
+
+       mod_func->ip = rec->ip - offset;
+       mod_func->size = symsize;
+
+       mod_map->num_funcs++;
+
+       list_add_rcu(&mod_func->list, &mod_map->funcs);
+}
+
+static struct ftrace_mod_map *
+allocate_ftrace_mod_map(struct module *mod,
+                       unsigned long start, unsigned long end)
+{
+       struct ftrace_mod_map *mod_map;
+
+       mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
+       if (!mod_map)
+               return NULL;
+
+       mod_map->mod = mod;
+       mod_map->start_addr = start;
+       mod_map->end_addr = end;
+       mod_map->num_funcs = 0;
+
+       INIT_LIST_HEAD_RCU(&mod_map->funcs);
+
+       list_add_rcu(&mod_map->list, &ftrace_mod_maps);
+
+       return mod_map;
+}
+
+static const char *
+ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
+                          unsigned long addr, unsigned long *size,
+                          unsigned long *off, char *sym)
+{
+       struct ftrace_mod_func *found_func =  NULL;
+       struct ftrace_mod_func *mod_func;
+
+       list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
+               if (addr >= mod_func->ip &&
+                   addr < mod_func->ip + mod_func->size) {
+                       found_func = mod_func;
+                       break;
+               }
+       }
+
+       if (found_func) {
+               if (size)
+                       *size = found_func->size;
+               if (off)
+                       *off = addr - found_func->ip;
+               if (sym)
+                       strlcpy(sym, found_func->name, KSYM_NAME_LEN);
+
+               return found_func->name;
+       }
+
+       return NULL;
+}
+
+const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym)
+{
+       struct ftrace_mod_map *mod_map;
+       const char *ret = NULL;
+
+       /* mod_map is freed via call_rcu_sched() */
+       preempt_disable();
+       list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
+               ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
+               if (ret) {
+                       if (modname)
+                               *modname = mod_map->mod->name;
+                       break;
+               }
+       }
+       preempt_enable();
+
+       return ret;
+}
+
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+                          char *type, char *name,
+                          char *module_name, int *exported)
+{
+       struct ftrace_mod_map *mod_map;
+       struct ftrace_mod_func *mod_func;
+
+       preempt_disable();
+       list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
+
+               if (symnum >= mod_map->num_funcs) {
+                       symnum -= mod_map->num_funcs;
+                       continue;
+               }
+
+               list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
+                       if (symnum > 1) {
+                               symnum--;
+                               continue;
+                       }
+
+                       *value = mod_func->ip;
+                       *type = 'T';
+                       strlcpy(name, mod_func->name, KSYM_NAME_LEN);
+                       strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
+                       *exported = 1;
+                       preempt_enable();
+                       return 0;
+               }
+               WARN_ON(1);
+               break;
+       }
+       preempt_enable();
+       return -ERANGE;
+}
+
+#else
+static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
+                               struct dyn_ftrace *rec) { }
+static inline struct ftrace_mod_map *
+allocate_ftrace_mod_map(struct module *mod,
+                       unsigned long start, unsigned long end)
+{
+       return NULL;
+}
 #endif /* CONFIG_MODULES */
 
-void __init ftrace_free_init_mem(void)
+struct ftrace_init_func {
+       struct list_head list;
+       unsigned long ip;
+};
+
+/* Clear any init ips from hashes */
+static void
+clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
+{
+       struct ftrace_func_entry *entry;
+
+       if (ftrace_hash_empty(hash))
+               return;
+
+       entry = __ftrace_lookup_ip(hash, func->ip);
+
+       /*
+        * Do not allow this rec to match again.
+        * Yeah, it may waste some memory, but will be removed
+        * if/when the hash is modified again.
+        */
+       if (entry)
+               entry->ip = 0;
+}
+
+static void
+clear_func_from_hashes(struct ftrace_init_func *func)
+{
+       struct trace_array *tr;
+
+       mutex_lock(&trace_types_lock);
+       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+               if (!tr->ops || !tr->ops->func_hash)
+                       continue;
+               mutex_lock(&tr->ops->func_hash->regex_lock);
+               clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
+               clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
+               mutex_unlock(&tr->ops->func_hash->regex_lock);
+       }
+       mutex_unlock(&trace_types_lock);
+}
+
+static void add_to_clear_hash_list(struct list_head *clear_list,
+                                  struct dyn_ftrace *rec)
+{
+       struct ftrace_init_func *func;
+
+       func = kmalloc(sizeof(*func), GFP_KERNEL);
+       if (!func) {
+               WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
+               return;
+       }
+
+       func->ip = rec->ip;
+       list_add(&func->list, clear_list);
+}
+
+void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
 {
-       unsigned long start = (unsigned long)(&__init_begin);
-       unsigned long end = (unsigned long)(&__init_end);
+       unsigned long start = (unsigned long)(start_ptr);
+       unsigned long end = (unsigned long)(end_ptr);
        struct ftrace_page **last_pg = &ftrace_pages_start;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
        struct dyn_ftrace key;
+       struct ftrace_mod_map *mod_map = NULL;
+       struct ftrace_init_func *func, *func_next;
+       struct list_head clear_hash;
        int order;
 
+       INIT_LIST_HEAD(&clear_hash);
+
        key.ip = start;
        key.flags = end;        /* overload flags, as it is unsigned long */
 
        mutex_lock(&ftrace_lock);
 
+       /*
+        * If we are freeing module init memory, then check if
+        * any tracer is active. If so, we need to save a mapping of
+        * the module functions being freed with the address.
+        */
+       if (mod && ftrace_ops_list != &ftrace_list_end)
+               mod_map = allocate_ftrace_mod_map(mod, start, end);
+
        for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
                if (end < pg->records[0].ip ||
                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
@@ -5890,6 +6122,13 @@ void __init ftrace_free_init_mem(void)
                              ftrace_cmp_recs);
                if (!rec)
                        continue;
+
+               /* rec will be cleared from hashes after ftrace_lock unlock */
+               add_to_clear_hash_list(&clear_hash, rec);
+
+               if (mod_map)
+                       save_ftrace_mod_rec(mod_map, rec);
+
                pg->index--;
                ftrace_update_tot_cnt--;
                if (!pg->index) {
@@ -5908,6 +6147,19 @@ void __init ftrace_free_init_mem(void)
                goto again;
        }
        mutex_unlock(&ftrace_lock);
+
+       list_for_each_entry_safe(func, func_next, &clear_hash, list) {
+               clear_func_from_hashes(func);
+               kfree(func);
+       }
+}
+
+void __init ftrace_free_init_mem(void)
+{
+       void *start = (void *)(&__init_begin);
+       void *end = (void *)(&__init_end);
+
+       ftrace_free_mem(NULL, start, end);
 }
 
 void __init ftrace_init(void)
@@ -6063,10 +6315,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                 * If any of the above fails then the op->func() is not executed.
                 */
                if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
-                   (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
-                    !ftrace_function_local_disabled(op)) &&
                    ftrace_ops_test(op, ip, regs)) {
-                   
                        if (FTRACE_WARN_ON(!op->func)) {
                                pr_warn("op=%p %pS\n", op, op);
                                goto out;
@@ -6124,10 +6373,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
 
        preempt_disable_notrace();
 
-       if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
-           !ftrace_function_local_disabled(op)) {
-               op->func(ip, parent_ip, op, regs);
-       }
+       op->func(ip, parent_ip, op, regs);
 
        preempt_enable_notrace();
        trace_clear_recursion(bit);
@@ -6151,7 +6397,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
         * or does per cpu logic, then we need to call the assist handler.
         */
        if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
-           ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
+           ops->flags & FTRACE_OPS_FL_RCU)
                return ftrace_ops_assist_func;
 
        return ops->func;
index d57fede84b3803c15bfa4eb0324bbea50f4bdd10..91874a95060de5de11aa47d3fbddb8c4980a0da8 100644 (file)
@@ -2536,61 +2536,29 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
  * The lock and unlock are done within a preempt disable section.
  * The current_context per_cpu variable can only be modified
  * by the current task between lock and unlock. But it can
- * be modified more than once via an interrupt. To pass this
- * information from the lock to the unlock without having to
- * access the 'in_interrupt()' functions again (which do show
- * a bit of overhead in something as critical as function tracing,
- * we use a bitmask trick.
+ * be modified more than once via an interrupt. There are four
+ * different contexts that we need to consider.
  *
- *  bit 0 =  NMI context
- *  bit 1 =  IRQ context
- *  bit 2 =  SoftIRQ context
- *  bit 3 =  normal context.
+ *  Normal context.
+ *  SoftIRQ context
+ *  IRQ context
+ *  NMI context
  *
- * This works because this is the order of contexts that can
- * preempt other contexts. A SoftIRQ never preempts an IRQ
- * context.
- *
- * When the context is determined, the corresponding bit is
- * checked and set (if it was set, then a recursion of that context
- * happened).
- *
- * On unlock, we need to clear this bit. To do so, just subtract
- * 1 from the current_context and AND it to itself.
- *
- * (binary)
- *  101 - 1 = 100
- *  101 & 100 = 100 (clearing bit zero)
- *
- *  1010 - 1 = 1001
- *  1010 & 1001 = 1000 (clearing bit 1)
- *
- * The least significant bit can be cleared this way, and it
- * just so happens that it is the same bit corresponding to
- * the current context.
+ * If for some reason the ring buffer starts to recurse, we
+ * only allow that to happen at most 4 times (one for each
+ * context). If it happens 5 times, then we consider this a
+ * recusive loop and do not let it go further.
  */
 
 static __always_inline int
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       unsigned int val = cpu_buffer->current_context;
-       int bit;
-
-       if (in_interrupt()) {
-               if (in_nmi())
-                       bit = RB_CTX_NMI;
-               else if (in_irq())
-                       bit = RB_CTX_IRQ;
-               else
-                       bit = RB_CTX_SOFTIRQ;
-       } else
-               bit = RB_CTX_NORMAL;
-
-       if (unlikely(val & (1 << bit)))
+       if (cpu_buffer->current_context >= 4)
                return 1;
 
-       val |= (1 << bit);
-       cpu_buffer->current_context = val;
+       cpu_buffer->current_context++;
+       /* Interrupts must see this update */
+       barrier();
 
        return 0;
 }
@@ -2598,7 +2566,9 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+       /* Don't let the dec leak out */
+       barrier();
+       cpu_buffer->current_context--;
 }
 
 /**
index 752e5daf0896fc529876f8801a95e23118713338..73e67b68c53b47d5b422970cd0dee1d0bec27002 100644 (file)
@@ -7687,6 +7687,7 @@ static int instance_mkdir(const char *name)
        struct trace_array *tr;
        int ret;
 
+       mutex_lock(&event_mutex);
        mutex_lock(&trace_types_lock);
 
        ret = -EEXIST;
@@ -7742,6 +7743,7 @@ static int instance_mkdir(const char *name)
        list_add(&tr->list, &ftrace_trace_arrays);
 
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return 0;
 
@@ -7753,6 +7755,7 @@ static int instance_mkdir(const char *name)
 
  out_unlock:
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return ret;
 
@@ -7765,6 +7768,7 @@ static int instance_rmdir(const char *name)
        int ret;
        int i;
 
+       mutex_lock(&event_mutex);
        mutex_lock(&trace_types_lock);
 
        ret = -ENODEV;
@@ -7810,6 +7814,7 @@ static int instance_rmdir(const char *name)
 
  out_unlock:
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return ret;
 }
@@ -8276,6 +8281,92 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 }
 EXPORT_SYMBOL_GPL(ftrace_dump);
 
+int trace_run_command(const char *buf, int (*createfn)(int, char **))
+{
+       char **argv;
+       int argc, ret;
+
+       argc = 0;
+       ret = 0;
+       argv = argv_split(GFP_KERNEL, buf, &argc);
+       if (!argv)
+               return -ENOMEM;
+
+       if (argc)
+               ret = createfn(argc, argv);
+
+       argv_free(argv);
+
+       return ret;
+}
+
+#define WRITE_BUFSIZE  4096
+
+ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
+                               size_t count, loff_t *ppos,
+                               int (*createfn)(int, char **))
+{
+       char *kbuf, *buf, *tmp;
+       int ret = 0;
+       size_t done = 0;
+       size_t size;
+
+       kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
+       if (!kbuf)
+               return -ENOMEM;
+
+       while (done < count) {
+               size = count - done;
+
+               if (size >= WRITE_BUFSIZE)
+                       size = WRITE_BUFSIZE - 1;
+
+               if (copy_from_user(kbuf, buffer + done, size)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+               kbuf[size] = '\0';
+               buf = kbuf;
+               do {
+                       tmp = strchr(buf, '\n');
+                       if (tmp) {
+                               *tmp = '\0';
+                               size = tmp - buf + 1;
+                       } else {
+                               size = strlen(buf);
+                               if (done + size < count) {
+                                       if (buf != kbuf)
+                                               break;
+                                       /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
+                                       pr_warn("Line length is too long: Should be less than %d\n",
+                                               WRITE_BUFSIZE - 2);
+                                       ret = -EINVAL;
+                                       goto out;
+                               }
+                       }
+                       done += size;
+
+                       /* Remove comments */
+                       tmp = strchr(buf, '#');
+
+                       if (tmp)
+                               *tmp = '\0';
+
+                       ret = trace_run_command(buf, createfn);
+                       if (ret)
+                               goto out;
+                       buf += size;
+
+               } while (done < count);
+       }
+       ret = done;
+
+out:
+       kfree(kbuf);
+
+       return ret;
+}
+
 __init static int tracer_alloc_buffers(void)
 {
        int ring_buf_size;
index 6b0b343a36a278be32a89e91e95066e84020c620..2a6d0325a76181a0a8b309eaa2b69f91b350d98a 100644 (file)
@@ -739,8 +739,6 @@ extern int trace_selftest_startup_wakeup(struct tracer *trace,
                                         struct trace_array *tr);
 extern int trace_selftest_startup_nop(struct tracer *trace,
                                         struct trace_array *tr);
-extern int trace_selftest_startup_sched_switch(struct tracer *trace,
-                                              struct trace_array *tr);
 extern int trace_selftest_startup_branch(struct tracer *trace,
                                         struct trace_array *tr);
 /*
@@ -1755,6 +1753,13 @@ void trace_printk_start_comm(void);
 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
 
+#define MAX_EVENT_NAME_LEN     64
+
+extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
+extern ssize_t trace_parse_run_command(struct file *file,
+               const char __user *buffer, size_t count, loff_t *ppos,
+               int (*createfn)(int, char**));
+
 /*
  * Normal trace_printk() and friends allocates special buffers
  * to do the manipulation, as well as saves the print formats
index 13ba2d3f6a91a147c8377041ee387ff457ab21c2..55d6dff37dafad5732da6adf85cddc38a4bb43bc 100644 (file)
@@ -240,27 +240,41 @@ void perf_trace_destroy(struct perf_event *p_event)
 int perf_trace_add(struct perf_event *p_event, int flags)
 {
        struct trace_event_call *tp_event = p_event->tp_event;
-       struct hlist_head __percpu *pcpu_list;
-       struct hlist_head *list;
-
-       pcpu_list = tp_event->perf_events;
-       if (WARN_ON_ONCE(!pcpu_list))
-               return -EINVAL;
 
        if (!(flags & PERF_EF_START))
                p_event->hw.state = PERF_HES_STOPPED;
 
-       list = this_cpu_ptr(pcpu_list);
-       hlist_add_head_rcu(&p_event->hlist_entry, list);
+       /*
+        * If TRACE_REG_PERF_ADD returns false; no custom action was performed
+        * and we need to take the default action of enqueueing our event on
+        * the right per-cpu hlist.
+        */
+       if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
+               struct hlist_head __percpu *pcpu_list;
+               struct hlist_head *list;
+
+               pcpu_list = tp_event->perf_events;
+               if (WARN_ON_ONCE(!pcpu_list))
+                       return -EINVAL;
+
+               list = this_cpu_ptr(pcpu_list);
+               hlist_add_head_rcu(&p_event->hlist_entry, list);
+       }
 
-       return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
+       return 0;
 }
 
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
        struct trace_event_call *tp_event = p_event->tp_event;
-       hlist_del_rcu(&p_event->hlist_entry);
-       tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
+
+       /*
+        * If TRACE_REG_PERF_DEL returns false; no custom action was performed
+        * and we need to take the default action of dequeueing our event from
+        * the right per-cpu hlist.
+        */
+       if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
+               hlist_del_rcu(&p_event->hlist_entry);
 }
 
 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
@@ -306,16 +320,25 @@ static void
 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *ops, struct pt_regs *pt_regs)
 {
-       struct perf_event *event;
        struct ftrace_entry *entry;
-       struct hlist_head *head;
+       struct perf_event *event;
+       struct hlist_head head;
        struct pt_regs regs;
        int rctx;
 
-       head = this_cpu_ptr(event_function.perf_events);
-       if (hlist_empty(head))
+       if ((unsigned long)ops->private != smp_processor_id())
                return;
 
+       event = container_of(ops, struct perf_event, ftrace_ops);
+
+       /*
+        * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
+        * the perf code does is hlist_for_each_entry_rcu(), so we can
+        * get away with simply setting the @head.first pointer in order
+        * to create a singular list.
+        */
+       head.first = &event->hlist_entry;
+
 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
                    sizeof(u64)) - sizeof(u32))
 
@@ -330,9 +353,8 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
 
        entry->ip = ip;
        entry->parent_ip = parent_ip;
-       event = container_of(ops, struct perf_event, ftrace_ops);
        perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
-                             1, &regs, head, NULL, event);
+                             1, &regs, &head, NULL);
 
 #undef ENTRY_SIZE
 }
@@ -341,8 +363,10 @@ static int perf_ftrace_function_register(struct perf_event *event)
 {
        struct ftrace_ops *ops = &event->ftrace_ops;
 
-       ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
-       ops->func = perf_ftrace_function_call;
+       ops->flags   = FTRACE_OPS_FL_RCU;
+       ops->func    = perf_ftrace_function_call;
+       ops->private = (void *)(unsigned long)nr_cpu_ids;
+
        return register_ftrace_function(ops);
 }
 
@@ -354,19 +378,11 @@ static int perf_ftrace_function_unregister(struct perf_event *event)
        return ret;
 }
 
-static void perf_ftrace_function_enable(struct perf_event *event)
-{
-       ftrace_function_local_enable(&event->ftrace_ops);
-}
-
-static void perf_ftrace_function_disable(struct perf_event *event)
-{
-       ftrace_function_local_disable(&event->ftrace_ops);
-}
-
 int perf_ftrace_event_register(struct trace_event_call *call,
                               enum trace_reg type, void *data)
 {
+       struct perf_event *event = data;
+
        switch (type) {
        case TRACE_REG_REGISTER:
        case TRACE_REG_UNREGISTER:
@@ -379,11 +395,11 @@ int perf_ftrace_event_register(struct trace_event_call *call,
        case TRACE_REG_PERF_CLOSE:
                return perf_ftrace_function_unregister(data);
        case TRACE_REG_PERF_ADD:
-               perf_ftrace_function_enable(data);
-               return 0;
+               event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
+               return 1;
        case TRACE_REG_PERF_DEL:
-               perf_ftrace_function_disable(data);
-               return 0;
+               event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
+               return 1;
        }
 
        return -EINVAL;
index 87468398b9ed6206722db4126cc6c6318580e357..ec0f9aa4e1516bd7fe2ee17b6b12ff27a81d1482 100644 (file)
@@ -1406,8 +1406,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
                return -ENODEV;
 
        /* Make sure the system still exists */
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
                list_for_each_entry(dir, &tr->systems, list) {
                        if (dir == inode->i_private) {
@@ -1421,8 +1421,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
                }
        }
  exit_loop:
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        if (!system)
                return -ENODEV;
@@ -2294,15 +2294,15 @@ static void __add_event_to_tracers(struct trace_event_call *call);
 int trace_add_event_call(struct trace_event_call *call)
 {
        int ret;
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
 
        ret = __register_event(call, NULL);
        if (ret >= 0)
                __add_event_to_tracers(call);
 
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
        return ret;
 }
 
@@ -2356,13 +2356,13 @@ int trace_remove_event_call(struct trace_event_call *call)
 {
        int ret;
 
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
        down_write(&trace_event_sem);
        ret = probe_remove_event_call(call);
        up_write(&trace_event_sem);
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return ret;
 }
@@ -2424,8 +2424,8 @@ static int trace_module_notify(struct notifier_block *self,
 {
        struct module *mod = data;
 
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
        switch (val) {
        case MODULE_STATE_COMING:
                trace_module_add_events(mod);
@@ -2434,8 +2434,8 @@ static int trace_module_notify(struct notifier_block *self,
                trace_module_remove_events(mod);
                break;
        }
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return 0;
 }
@@ -2950,24 +2950,24 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
  * creates the event hierachry in the @parent/events directory.
  *
  * Returns 0 on success.
+ *
+ * Must be called with event_mutex held.
  */
 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
 {
        int ret;
 
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
 
        ret = create_event_toplevel_files(parent, tr);
        if (ret)
-               goto out_unlock;
+               goto out;
 
        down_write(&trace_event_sem);
        __trace_add_event_dirs(tr);
        up_write(&trace_event_sem);
 
- out_unlock:
-       mutex_unlock(&event_mutex);
-
+ out:
        return ret;
 }
 
@@ -2996,9 +2996,10 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
        return ret;
 }
 
+/* Must be called with event_mutex held */
 int event_trace_del_tracer(struct trace_array *tr)
 {
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
 
        /* Disable any event triggers and associated soft-disabled events */
        clear_event_triggers(tr);
@@ -3019,8 +3020,6 @@ int event_trace_del_tracer(struct trace_array *tr)
 
        tr->event_dir = NULL;
 
-       mutex_unlock(&event_mutex);
-
        return 0;
 }
 
index 1c21d0e2a145a6e180116fc1fd0705ae7e3fdb17..1e1558c99d56090eea6a321c11d7790334fb0e24 100644 (file)
@@ -28,12 +28,16 @@ struct hist_field;
 
 typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
 
+#define HIST_FIELD_OPERANDS_MAX        2
+
 struct hist_field {
        struct ftrace_event_field       *field;
        unsigned long                   flags;
        hist_field_fn_t                 fn;
        unsigned int                    size;
        unsigned int                    offset;
+       unsigned int                    is_signed;
+       struct hist_field               *operands[HIST_FIELD_OPERANDS_MAX];
 };
 
 static u64 hist_field_none(struct hist_field *field, void *event)
@@ -71,7 +75,9 @@ static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
 
 static u64 hist_field_log2(struct hist_field *hist_field, void *event)
 {
-       u64 val = *(u64 *)(event + hist_field->field->offset);
+       struct hist_field *operand = hist_field->operands[0];
+
+       u64 val = operand->fn(operand, event);
 
        return (u64) ilog2(roundup_pow_of_two(val));
 }
@@ -110,16 +116,16 @@ DEFINE_HIST_FIELD_FN(u8);
 #define HIST_KEY_SIZE_MAX      (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
 
 enum hist_field_flags {
-       HIST_FIELD_FL_HITCOUNT          = 1,
-       HIST_FIELD_FL_KEY               = 2,
-       HIST_FIELD_FL_STRING            = 4,
-       HIST_FIELD_FL_HEX               = 8,
-       HIST_FIELD_FL_SYM               = 16,
-       HIST_FIELD_FL_SYM_OFFSET        = 32,
-       HIST_FIELD_FL_EXECNAME          = 64,
-       HIST_FIELD_FL_SYSCALL           = 128,
-       HIST_FIELD_FL_STACKTRACE        = 256,
-       HIST_FIELD_FL_LOG2              = 512,
+       HIST_FIELD_FL_HITCOUNT          = 1 << 0,
+       HIST_FIELD_FL_KEY               = 1 << 1,
+       HIST_FIELD_FL_STRING            = 1 << 2,
+       HIST_FIELD_FL_HEX               = 1 << 3,
+       HIST_FIELD_FL_SYM               = 1 << 4,
+       HIST_FIELD_FL_SYM_OFFSET        = 1 << 5,
+       HIST_FIELD_FL_EXECNAME          = 1 << 6,
+       HIST_FIELD_FL_SYSCALL           = 1 << 7,
+       HIST_FIELD_FL_STACKTRACE        = 1 << 8,
+       HIST_FIELD_FL_LOG2              = 1 << 9,
 };
 
 struct hist_trigger_attrs {
@@ -146,6 +152,25 @@ struct hist_trigger_data {
        struct tracing_map              *map;
 };
 
+static const char *hist_field_name(struct hist_field *field,
+                                  unsigned int level)
+{
+       const char *field_name = "";
+
+       if (level > 1)
+               return field_name;
+
+       if (field->field)
+               field_name = field->field->name;
+       else if (field->flags & HIST_FIELD_FL_LOG2)
+               field_name = hist_field_name(field->operands[0], ++level);
+
+       if (field_name == NULL)
+               field_name = "";
+
+       return field_name;
+}
+
 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
 {
        hist_field_fn_t fn = NULL;
@@ -340,8 +365,20 @@ static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
        .elt_init       = hist_trigger_elt_comm_init,
 };
 
-static void destroy_hist_field(struct hist_field *hist_field)
+static void destroy_hist_field(struct hist_field *hist_field,
+                              unsigned int level)
 {
+       unsigned int i;
+
+       if (level > 2)
+               return;
+
+       if (!hist_field)
+               return;
+
+       for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
+               destroy_hist_field(hist_field->operands[i], level + 1);
+
        kfree(hist_field);
 }
 
@@ -368,7 +405,10 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
        }
 
        if (flags & HIST_FIELD_FL_LOG2) {
+               unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
                hist_field->fn = hist_field_log2;
+               hist_field->operands[0] = create_hist_field(field, fl);
+               hist_field->size = hist_field->operands[0]->size;
                goto out;
        }
 
@@ -388,7 +428,7 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
                hist_field->fn = select_value_fn(field->size,
                                                 field->is_signed);
                if (!hist_field->fn) {
-                       destroy_hist_field(hist_field);
+                       destroy_hist_field(hist_field, 0);
                        return NULL;
                }
        }
@@ -405,7 +445,7 @@ static void destroy_hist_fields(struct hist_trigger_data *hist_data)
 
        for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
                if (hist_data->fields[i]) {
-                       destroy_hist_field(hist_data->fields[i]);
+                       destroy_hist_field(hist_data->fields[i], 0);
                        hist_data->fields[i] = NULL;
                }
        }
@@ -450,7 +490,7 @@ static int create_val_field(struct hist_trigger_data *hist_data,
        }
 
        field = trace_find_event_field(file->event_call, field_name);
-       if (!field) {
+       if (!field || !field->size) {
                ret = -EINVAL;
                goto out;
        }
@@ -548,7 +588,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
                }
 
                field = trace_find_event_field(file->event_call, field_name);
-               if (!field) {
+               if (!field || !field->size) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -653,7 +693,6 @@ static int is_descending(const char *str)
 static int create_sort_keys(struct hist_trigger_data *hist_data)
 {
        char *fields_str = hist_data->attrs->sort_key_str;
-       struct ftrace_event_field *field = NULL;
        struct tracing_map_sort_key *sort_key;
        int descending, ret = 0;
        unsigned int i, j;
@@ -670,7 +709,9 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
        }
 
        for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
+               struct hist_field *hist_field;
                char *field_str, *field_name;
+               const char *test_name;
 
                sort_key = &hist_data->sort_keys[i];
 
@@ -703,8 +744,10 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
                }
 
                for (j = 1; j < hist_data->n_fields; j++) {
-                       field = hist_data->fields[j]->field;
-                       if (field && (strcmp(field_name, field->name) == 0)) {
+                       hist_field = hist_data->fields[j];
+                       test_name = hist_field_name(hist_field, 0);
+
+                       if (strcmp(field_name, test_name) == 0) {
                                sort_key->field_idx = j;
                                descending = is_descending(field_str);
                                if (descending < 0) {
@@ -952,6 +995,7 @@ hist_trigger_entry_print(struct seq_file *m,
        struct hist_field *key_field;
        char str[KSYM_SYMBOL_LEN];
        bool multiline = false;
+       const char *field_name;
        unsigned int i;
        u64 uval;
 
@@ -963,26 +1007,27 @@ hist_trigger_entry_print(struct seq_file *m,
                if (i > hist_data->n_vals)
                        seq_puts(m, ", ");
 
+               field_name = hist_field_name(key_field, 0);
+
                if (key_field->flags & HIST_FIELD_FL_HEX) {
                        uval = *(u64 *)(key + key_field->offset);
-                       seq_printf(m, "%s: %llx",
-                                  key_field->field->name, uval);
+                       seq_printf(m, "%s: %llx", field_name, uval);
                } else if (key_field->flags & HIST_FIELD_FL_SYM) {
                        uval = *(u64 *)(key + key_field->offset);
                        sprint_symbol_no_offset(str, uval);
-                       seq_printf(m, "%s: [%llx] %-45s",
-                                  key_field->field->name, uval, str);
+                       seq_printf(m, "%s: [%llx] %-45s", field_name,
+                                  uval, str);
                } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
                        uval = *(u64 *)(key + key_field->offset);
                        sprint_symbol(str, uval);
-                       seq_printf(m, "%s: [%llx] %-55s",
-                                  key_field->field->name, uval, str);
+                       seq_printf(m, "%s: [%llx] %-55s", field_name,
+                                  uval, str);
                } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
                        char *comm = elt->private_data;
 
                        uval = *(u64 *)(key + key_field->offset);
-                       seq_printf(m, "%s: %-16s[%10llu]",
-                                  key_field->field->name, comm, uval);
+                       seq_printf(m, "%s: %-16s[%10llu]", field_name,
+                                  comm, uval);
                } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
                        const char *syscall_name;
 
@@ -991,8 +1036,8 @@ hist_trigger_entry_print(struct seq_file *m,
                        if (!syscall_name)
                                syscall_name = "unknown_syscall";
 
-                       seq_printf(m, "%s: %-30s[%3llu]",
-                                  key_field->field->name, syscall_name, uval);
+                       seq_printf(m, "%s: %-30s[%3llu]", field_name,
+                                  syscall_name, uval);
                } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
                        seq_puts(m, "stacktrace:\n");
                        hist_trigger_stacktrace_print(m,
@@ -1000,15 +1045,14 @@ hist_trigger_entry_print(struct seq_file *m,
                                                      HIST_STACKTRACE_DEPTH);
                        multiline = true;
                } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
-                       seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name,
+                       seq_printf(m, "%s: ~ 2^%-2llu", field_name,
                                   *(u64 *)(key + key_field->offset));
                } else if (key_field->flags & HIST_FIELD_FL_STRING) {
-                       seq_printf(m, "%s: %-50s", key_field->field->name,
+                       seq_printf(m, "%s: %-50s", field_name,
                                   (char *)(key + key_field->offset));
                } else {
                        uval = *(u64 *)(key + key_field->offset);
-                       seq_printf(m, "%s: %10llu", key_field->field->name,
-                                  uval);
+                       seq_printf(m, "%s: %10llu", field_name, uval);
                }
        }
 
@@ -1021,13 +1065,13 @@ hist_trigger_entry_print(struct seq_file *m,
                   tracing_map_read_sum(elt, HITCOUNT_IDX));
 
        for (i = 1; i < hist_data->n_vals; i++) {
+               field_name = hist_field_name(hist_data->fields[i], 0);
+
                if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
-                       seq_printf(m, "  %s: %10llx",
-                                  hist_data->fields[i]->field->name,
+                       seq_printf(m, "  %s: %10llx", field_name,
                                   tracing_map_read_sum(elt, i));
                } else {
-                       seq_printf(m, "  %s: %10llu",
-                                  hist_data->fields[i]->field->name,
+                       seq_printf(m, "  %s: %10llu", field_name,
                                   tracing_map_read_sum(elt, i));
                }
        }
@@ -1062,7 +1106,7 @@ static void hist_trigger_show(struct seq_file *m,
                              struct event_trigger_data *data, int n)
 {
        struct hist_trigger_data *hist_data;
-       int n_entries, ret = 0;
+       int n_entries;
 
        if (n > 0)
                seq_puts(m, "\n\n");
@@ -1073,10 +1117,8 @@ static void hist_trigger_show(struct seq_file *m,
 
        hist_data = data->private_data;
        n_entries = print_entries(m, hist_data);
-       if (n_entries < 0) {
-               ret = n_entries;
+       if (n_entries < 0)
                n_entries = 0;
-       }
 
        seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
                   (u64)atomic64_read(&hist_data->map->hits),
@@ -1142,7 +1184,9 @@ static const char *get_hist_field_flags(struct hist_field *hist_field)
 
 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
 {
-       seq_printf(m, "%s", hist_field->field->name);
+       const char *field_name = hist_field_name(hist_field, 0);
+
+       seq_printf(m, "%s", field_name);
        if (hist_field->flags) {
                const char *flags_str = get_hist_field_flags(hist_field);
 
index 7758bc0617cb15d8731defbc67912b5eb46246c2..03ecb4465ee4587290e0474143f425f892771140 100644 (file)
 
 #include "trace.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
+#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
 static struct trace_array              *irqsoff_trace __read_mostly;
 static int                             tracer_enabled __read_mostly;
 
@@ -462,64 +466,44 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
 
 #else /* !CONFIG_PROVE_LOCKING */
 
-/*
- * Stubs:
- */
-
-void trace_softirqs_on(unsigned long ip)
-{
-}
-
-void trace_softirqs_off(unsigned long ip)
-{
-}
-
-inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
-
 /*
  * We are only interested in hardirq on/off events:
  */
-void trace_hardirqs_on(void)
+static inline void tracer_hardirqs_on(void)
 {
        if (!preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL(trace_hardirqs_on);
 
-void trace_hardirqs_off(void)
+static inline void tracer_hardirqs_off(void)
 {
        if (!preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL(trace_hardirqs_off);
 
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
 {
        if (!preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, caller_addr);
 }
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
 {
        if (!preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, caller_addr);
 }
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
 
 #endif /* CONFIG_PROVE_LOCKING */
 #endif /*  CONFIG_IRQSOFF_TRACER */
 
 #ifdef CONFIG_PREEMPT_TRACER
-void trace_preempt_on(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
 {
        if (preempt_trace() && !irq_trace())
                stop_critical_timing(a0, a1);
 }
 
-void trace_preempt_off(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
 {
        if (preempt_trace() && !irq_trace())
                start_critical_timing(a0, a1);
@@ -781,3 +765,100 @@ __init static int init_irqsoff_tracer(void)
        return 0;
 }
 core_initcall(init_irqsoff_tracer);
+#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
+
+#ifndef CONFIG_IRQSOFF_TRACER
+static inline void tracer_hardirqs_on(void) { }
+static inline void tracer_hardirqs_off(void) { }
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
+#endif
+
+#ifndef CONFIG_PREEMPT_TRACER
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
+/* Per-cpu variable to prevent redundant calls when IRQs already off */
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+
+void trace_hardirqs_on(void)
+{
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+       tracer_hardirqs_on();
+
+       this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on);
+
+void trace_hardirqs_off(void)
+{
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+
+       trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+       tracer_hardirqs_off();
+}
+EXPORT_SYMBOL(trace_hardirqs_off);
+
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+{
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+       tracer_hardirqs_on_caller(caller_addr);
+
+       this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+{
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+
+       trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+       tracer_hardirqs_off_caller(caller_addr);
+}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+/*
+ * Stubs:
+ */
+
+void trace_softirqs_on(unsigned long ip)
+{
+}
+
+void trace_softirqs_off(unsigned long ip)
+{
+}
+
+inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+#if defined(CONFIG_PREEMPT_TRACER) || \
+       (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
+void trace_preempt_on(unsigned long a0, unsigned long a1)
+{
+       trace_preempt_enable_rcuidle(a0, a1);
+       tracer_preempt_on(a0, a1);
+}
+
+void trace_preempt_off(unsigned long a0, unsigned long a1)
+{
+       trace_preempt_disable_rcuidle(a0, a1);
+       tracer_preempt_off(a0, a1);
+}
+#endif
index abf92e478cfb59e8b82c8851c7238e4f75669f2c..492700c5fb4d27979d119b13028024a06f27b400 100644 (file)
@@ -907,8 +907,8 @@ static int probes_open(struct inode *inode, struct file *file)
 static ssize_t probes_write(struct file *file, const char __user *buffer,
                            size_t count, loff_t *ppos)
 {
-       return traceprobe_probes_write(file, buffer, count, ppos,
-                       create_trace_kprobe);
+       return trace_parse_run_command(file, buffer, count, ppos,
+                                      create_trace_kprobe);
 }
 
 static const struct file_operations kprobe_events_ops = {
@@ -1199,7 +1199,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
        memset(&entry[1], 0, dsize);
        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
 }
 NOKPROBE_SYMBOL(kprobe_perf_func);
 
@@ -1234,7 +1234,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
        entry->ret_ip = (unsigned long)ri->ret_addr;
        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
 }
 NOKPROBE_SYMBOL(kretprobe_perf_func);
 #endif /* CONFIG_PERF_EVENTS */
@@ -1431,9 +1431,9 @@ static __init int kprobe_trace_self_tests_init(void)
 
        pr_info("Testing kprobe tracing: ");
 
-       ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
-                                 "$stack $stack0 +0($stack)",
-                                 create_trace_kprobe);
+       ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
+                               "$stack $stack0 +0($stack)",
+                               create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on probing function entry.\n");
                warn++;
@@ -1453,8 +1453,8 @@ static __init int kprobe_trace_self_tests_init(void)
                }
        }
 
-       ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
-                                 "$retval", create_trace_kprobe);
+       ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
+                               "$retval", create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on probing function return.\n");
                warn++;
@@ -1524,13 +1524,13 @@ static __init int kprobe_trace_self_tests_init(void)
                        disable_trace_kprobe(tk, file);
        }
 
-       ret = traceprobe_command("-:testprobe", create_trace_kprobe);
+       ret = trace_run_command("-:testprobe", create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
-       ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
+       ret = trace_run_command("-:testprobe2", create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on deleting a probe.\n");
                warn++;
index 52478f033f88f2d38315c887406608554c391a32..d5935730867709232f547fd08ef6a3659ea819c9 100644 (file)
@@ -623,92 +623,6 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
        kfree(arg->comm);
 }
 
-int traceprobe_command(const char *buf, int (*createfn)(int, char **))
-{
-       char **argv;
-       int argc, ret;
-
-       argc = 0;
-       ret = 0;
-       argv = argv_split(GFP_KERNEL, buf, &argc);
-       if (!argv)
-               return -ENOMEM;
-
-       if (argc)
-               ret = createfn(argc, argv);
-
-       argv_free(argv);
-
-       return ret;
-}
-
-#define WRITE_BUFSIZE  4096
-
-ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
-                               size_t count, loff_t *ppos,
-                               int (*createfn)(int, char **))
-{
-       char *kbuf, *buf, *tmp;
-       int ret = 0;
-       size_t done = 0;
-       size_t size;
-
-       kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
-       if (!kbuf)
-               return -ENOMEM;
-
-       while (done < count) {
-               size = count - done;
-
-               if (size >= WRITE_BUFSIZE)
-                       size = WRITE_BUFSIZE - 1;
-
-               if (copy_from_user(kbuf, buffer + done, size)) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-               kbuf[size] = '\0';
-               buf = kbuf;
-               do {
-                       tmp = strchr(buf, '\n');
-                       if (tmp) {
-                               *tmp = '\0';
-                               size = tmp - buf + 1;
-                       } else {
-                               size = strlen(buf);
-                               if (done + size < count) {
-                                       if (buf != kbuf)
-                                               break;
-                                       /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
-                                       pr_warn("Line length is too long: Should be less than %d\n",
-                                               WRITE_BUFSIZE - 2);
-                                       ret = -EINVAL;
-                                       goto out;
-                               }
-                       }
-                       done += size;
-
-                       /* Remove comments */
-                       tmp = strchr(buf, '#');
-
-                       if (tmp)
-                               *tmp = '\0';
-
-                       ret = traceprobe_command(buf, createfn);
-                       if (ret)
-                               goto out;
-                       buf += size;
-
-               } while (done < count);
-       }
-       ret = done;
-
-out:
-       kfree(kbuf);
-
-       return ret;
-}
-
 static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
                           bool is_return)
 {
index 903273c93e6167afcbe2de99451a906c2e79ab1f..fb66e3eaa192a24d924ec743dd2814283f8298c8 100644 (file)
@@ -42,7 +42,6 @@
 
 #define MAX_TRACE_ARGS         128
 #define MAX_ARGSTR_LEN         63
-#define MAX_EVENT_NAME_LEN     64
 #define MAX_STRING_SIZE                PATH_MAX
 
 /* Reserved field names */
@@ -356,12 +355,6 @@ extern void traceprobe_free_probe_arg(struct probe_arg *arg);
 
 extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
 
-extern ssize_t traceprobe_probes_write(struct file *file,
-               const char __user *buffer, size_t count, loff_t *ppos,
-               int (*createfn)(int, char**));
-
-extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
-
 /* Sum up total data length for dynamic arraies (strings) */
 static nokprobe_inline int
 __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
index cd70eb5df38ecce05eba1bc7a485b6cb59e674f1..11e9daa4a568a22c38669269a89769b0b3bae9f3 100644 (file)
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  * Test the trace buffer to see if all the elements
  * are still sane.
  */
-static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
+static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
 {
        unsigned long flags, cnt = 0;
        int cpu, ret = 0;
@@ -1151,38 +1151,6 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
 }
 #endif /* CONFIG_SCHED_TRACER */
 
-#ifdef CONFIG_CONTEXT_SWITCH_TRACER
-int
-trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
-{
-       unsigned long count;
-       int ret;
-
-       /* start the tracing */
-       ret = tracer_init(trace, tr);
-       if (ret) {
-               warn_failed_init_tracer(trace, ret);
-               return ret;
-       }
-
-       /* Sleep for a 1/10 of a second */
-       msleep(100);
-       /* stop the tracing. */
-       tracing_stop();
-       /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
-       trace->reset(tr);
-       tracing_start();
-
-       if (!ret && !count) {
-               printk(KERN_CONT ".. no entries found ..");
-               ret = -1;
-       }
-
-       return ret;
-}
-#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
-
 #ifdef CONFIG_BRANCH_TRACER
 int
 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
index 19bcaaac884be8ae033184d3ed2ff0a53d35a9f2..f93a56d2db275be64df083344b68ec65f3c32473 100644 (file)
@@ -625,7 +625,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
 
        perf_trace_buf_submit(rec, size, rctx,
                              sys_data->enter_event->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
 }
 
 static int perf_sysenter_enable(struct trace_event_call *call)
@@ -721,7 +721,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        }
 
        perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
-                             1, regs, head, NULL, NULL);
+                             1, regs, head, NULL);
 }
 
 static int perf_sysexit_enable(struct trace_event_call *call)
index 153c0e4114611fde7e7212c7a9bf40b8e42578e5..40592e7b3568bcfd41220f579fc05066e3675b33 100644 (file)
@@ -651,7 +651,7 @@ static int probes_open(struct inode *inode, struct file *file)
 static ssize_t probes_write(struct file *file, const char __user *buffer,
                            size_t count, loff_t *ppos)
 {
-       return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
+       return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
 }
 
 static const struct file_operations uprobe_events_ops = {
@@ -1155,7 +1155,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
        }
 
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
  out:
        preempt_enable();
 }
index 305039b122fafba242f73b5982289ce4a12a6e20..07e75344725ba254f5c42a314659142f0fd48528 100644 (file)
@@ -428,7 +428,8 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
 
                if (test_key && test_key == key_hash && entry->val &&
                    keys_match(key, entry->val->key, map->key_size)) {
-                       atomic64_inc(&map->hits);
+                       if (!lookup_only)
+                               atomic64_inc(&map->hits);
                        return entry->val;
                }
 
index ab0ca77331d0429fbab0e38b8e3525bbaa734bce..5b5bbf8ae550dfe2c7d6b08496d1a8327b0d6a10 100644 (file)
@@ -6,7 +6,7 @@
 #define TRACING_MAP_BITS_MAX           17
 #define TRACING_MAP_BITS_MIN           7
 
-#define TRACING_MAP_KEYS_MAX           2
+#define TRACING_MAP_KEYS_MAX           3
 #define TRACING_MAP_VALS_MAX           3
 #define TRACING_MAP_FIELDS_MAX         (TRACING_MAP_KEYS_MAX + \
                                         TRACING_MAP_VALS_MAX)
index 6ff9905250ff0563713c64585a74e888de70f343..18e5fa4b0e71913087585429e561d9eb4e30b73d 100644 (file)
@@ -537,14 +537,14 @@ static int proc_cap_handler(struct ctl_table *table, int write,
        /*
         * Drop everything not in the new_cap (but don't add things)
         */
-       spin_lock(&umh_sysctl_lock);
        if (write) {
+               spin_lock(&umh_sysctl_lock);
                if (table->data == CAP_BSET)
                        usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
                if (table->data == CAP_PI)
                        usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
+               spin_unlock(&umh_sysctl_lock);
        }
-       spin_unlock(&umh_sysctl_lock);
 
        return 0;
 }
index dde6298f6b221e136fc579a37adc80310f97a8f6..8fdb710bfdd732fc3e6bfaa3110264de70c9af1d 100644 (file)
@@ -1509,7 +1509,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        struct work_struct *work = &dwork->work;
 
        WARN_ON_ONCE(!wq);
-       WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)delayed_work_timer_fn);
+       WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
        WARN_ON_ONCE(timer_pending(timer));
        WARN_ON_ONCE(!list_empty(&work->entry));
 
index a2b6745324ab358a5685ccb6ce690a4f64a8b809..c5e84fbcb30b7562656dcbbc050bcc0ec4cb7a90 100644 (file)
@@ -46,10 +46,6 @@ config GENERIC_IOMAP
        bool
        select GENERIC_PCI_IOMAP
 
-config GENERIC_IO
-       bool
-       default n
-
 config STMP_DEVICE
        bool
 
@@ -584,7 +580,7 @@ config PRIME_NUMBERS
        tristate
 
 config STRING_SELFTEST
-       bool "Test string functions"
+       tristate "Test string functions"
 
 endmenu
 
index 8ffd891857aba74eb4c309c692e6ae7034bd4a7b..947d3e2ed5c2f1a7fa1e001e4bf56b9c7a2b9d49 100644 (file)
@@ -756,6 +756,16 @@ config KCOV
 
          For more details, see Documentation/dev-tools/kcov.rst.
 
+config KCOV_ENABLE_COMPARISONS
+       bool "Enable comparison operands collection by KCOV"
+       depends on KCOV
+       default n
+       help
+         KCOV also exposes operands of every comparison in the instrumented
+         code along with operand sizes and PCs of the comparison instructions.
+         These operands can be used by fuzzing engines to improve the quality
+         of fuzzing coverage.
+
 config KCOV_INSTRUMENT_ALL
        bool "Instrument all code by default"
        depends on KCOV
@@ -1850,6 +1860,15 @@ config TEST_BPF
 
          If unsure, say N.
 
+config TEST_FIND_BIT
+       tristate "Test find_bit functions"
+       default n
+       help
+         This builds the "test_find_bit" module that measure find_*_bit()
+         functions performance.
+
+         If unsure, say N.
+
 config TEST_FIRMWARE
        tristate "Test firmware loading via userspace interface"
        default n
index 136a0b2545641dc0f6e4635fd7124a41081fa5a9..d11c48ec8ffdbd32db32cf9258c8bb45e92e440b 100644 (file)
@@ -40,12 +40,14 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
         bsearch.o find_bit.o llist.o memweight.o kfifo.o \
         percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
         once.o refcount.o usercopy.o errseq.o
+obj-$(CONFIG_STRING_SELFTEST) += test_string.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += hexdump.o
 obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
+obj-$(CONFIG_TEST_FIND_BIT) += test_find_bit.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
 obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
 obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
index 1e094408c893db33f42542960406e93eb19a1d87..c1b0fad31b109157427d1ec1e30e7e93e303465d 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -186,7 +186,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
                return BUG_TRAP_TYPE_WARN;
        }
 
-       printk(KERN_DEFAULT "------------[ cut here ]------------\n");
+       printk(KERN_DEFAULT CUT_HERE);
 
        if (file)
                pr_crit("kernel BUG at %s:%u!\n", file, line);
@@ -196,3 +196,26 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
 
        return BUG_TRAP_TYPE_BUG;
 }
+
+static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
+{
+       struct bug_entry *bug;
+
+       for (bug = start; bug < end; bug++)
+               bug->flags &= ~BUGFLAG_DONE;
+}
+
+void generic_bug_clear_once(void)
+{
+#ifdef CONFIG_MODULES
+       struct module *mod;
+
+       rcu_read_lock_sched();
+       list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
+               clear_once_table(mod->bug_table,
+                                mod->bug_table + mod->num_bugs);
+       rcu_read_unlock_sched();
+#endif
+
+       clear_once_table(__start___bug_table, __stop___bug_table);
+}
index ea4cc3dde4f1bac9f3b8337fdac95408cb3e8b35..1b34d210452c5aba703aea04e648dd6ed56fd576 100644 (file)
@@ -1495,14 +1495,22 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
        if (!entry)
                return;
 
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+               return;
+
        entry->type      = dma_debug_coherent;
        entry->dev       = dev;
-       entry->pfn       = page_to_pfn(virt_to_page(virt));
        entry->offset    = offset_in_page(virt);
        entry->size      = size;
        entry->dev_addr  = dma_addr;
        entry->direction = DMA_BIDIRECTIONAL;
 
+       if (is_vmalloc_addr(virt))
+               entry->pfn = vmalloc_to_pfn(virt);
+       else
+               entry->pfn = page_to_pfn(virt_to_page(virt));
+
        add_dma_entry(entry);
 }
 EXPORT_SYMBOL(debug_dma_alloc_coherent);
@@ -1513,13 +1521,21 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
        struct dma_debug_entry ref = {
                .type           = dma_debug_coherent,
                .dev            = dev,
-               .pfn            = page_to_pfn(virt_to_page(virt)),
                .offset         = offset_in_page(virt),
                .dev_addr       = addr,
                .size           = size,
                .direction      = DMA_BIDIRECTIONAL,
        };
 
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+               return;
+
+       if (is_vmalloc_addr(virt))
+               ref.pfn = vmalloc_to_pfn(virt);
+       else
+               ref.pfn = page_to_pfn(virt_to_page(virt));
+
        if (unlikely(dma_debug_disabled()))
                return;
 
index da796e2dc4f506dc63db68b132180acff00c291f..c7c96bc7654af7c4d5f3c8bbf595989f9b3d0ebf 100644 (file)
@@ -360,6 +360,10 @@ static int ddebug_parse_query(char *words[], int nwords,
                                if (parse_lineno(last, &query->last_lineno) < 0)
                                        return -EINVAL;
 
+                               /* special case for last lineno not specified */
+                               if (query->last_lineno == 0)
+                                       query->last_lineno = UINT_MAX;
+
                                if (query->last_lineno < query->first_lineno) {
                                        pr_err("last-line:%d < 1st-line:%d\n",
                                                query->last_lineno,
index 144fe6b1a03ea536893f6e35d153a895a238b67d..ca06adc4f44510d816ddc9911a1ff38f04cd497b 100644 (file)
@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
        chunk->phys_addr = phys;
        chunk->start_addr = virt;
        chunk->end_addr = virt + size - 1;
-       atomic_set(&chunk->avail, size);
+       atomic_long_set(&chunk->avail, size);
 
        spin_lock(&pool->lock);
        list_add_rcu(&chunk->next_chunk, &pool->chunks);
@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
        nbits = (size + (1UL << order) - 1) >> order;
        rcu_read_lock();
        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
-               if (size > atomic_read(&chunk->avail))
+               if (size > atomic_long_read(&chunk->avail))
                        continue;
 
                start_bit = 0;
@@ -324,7 +324,7 @@ retry:
 
                addr = chunk->start_addr + ((unsigned long)start_bit << order);
                size = nbits << order;
-               atomic_sub(size, &chunk->avail);
+               atomic_long_sub(size, &chunk->avail);
                break;
        }
        rcu_read_unlock();
@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
                        remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
                        BUG_ON(remain);
                        size = nbits << order;
-                       atomic_add(size, &chunk->avail);
+                       atomic_long_add(size, &chunk->avail);
                        rcu_read_unlock();
                        return;
                }
@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
 
        rcu_read_lock();
        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
-               avail += atomic_read(&chunk->avail);
+               avail += atomic_long_read(&chunk->avail);
        rcu_read_unlock();
        return avail;
 }
index db0b5aa071fc14e68fc3073e1a41c0d4617e65cf..e2d329099bf7483d2f09f48ade59e834cd6b7555 100644 (file)
@@ -8,12 +8,13 @@
 
 #include <linux/kernel.h>
 #include <linux/export.h>
+#include <linux/bitops.h>
 
 /**
- * int_sqrt - rough approximation to sqrt
+ * int_sqrt - computes the integer square root
  * @x: integer of which to calculate the sqrt
  *
- * A very rough approximation to the sqrt() function.
+ * Computes: floor(sqrt(x))
  */
 unsigned long int_sqrt(unsigned long x)
 {
@@ -22,7 +23,7 @@ unsigned long int_sqrt(unsigned long x)
        if (x <= 1)
                return x;
 
-       m = 1UL << (BITS_PER_LONG - 2);
+       m = 1UL << (__fls(x) & ~1UL);
        while (m != 0) {
                b = y + m;
                y >>= 1;
index 0e343fd29570194d775594fb8e5319682c8c3d5c..835242e74aaa3372957e46291c08bdc16ec59882 100644 (file)
        MODULE_PARM_DESC(name, msg);
 
 __param(int, nnodes, 100, "Number of nodes in the interval tree");
-__param(int, perf_loops, 100000, "Number of iterations modifying the tree");
+__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
 
 __param(int, nsearches, 100, "Number of searches to the interval tree");
-__param(int, search_loops, 10000, "Number of iterations searching the tree");
+__param(int, search_loops, 1000, "Number of iterations searching the tree");
 __param(bool, search_all, false, "Searches will iterate all nodes in the tree");
 
 __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
index 46e4c749e4eba64da193d0e1f221fa65eee4802a..61a6b5aab07e758cf448119546597ce54bcce9f6 100644 (file)
@@ -93,8 +93,8 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
        if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
                arch_spin_lock(&lock);
                if (regs && cpu_in_idle(instruction_pointer(regs))) {
-                       pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
-                               cpu, instruction_pointer(regs));
+                       pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
+                               cpu, (void *)instruction_pointer(regs));
                } else {
                        pr_warn("NMI backtrace for cpu %d\n", cpu);
                        if (regs)
index 65cc018fef40d714272fb2e7948084e4082fe499..4aaa76404d561b86609bbd1c6eb3ca4620ca81bf 100644 (file)
@@ -213,11 +213,11 @@ static int __init prandom_init(void)
 }
 core_initcall(prandom_init);
 
-static void __prandom_timer(unsigned long dontcare);
+static void __prandom_timer(struct timer_list *unused);
 
 static DEFINE_TIMER(seed_timer, __prandom_timer);
 
-static void __prandom_timer(unsigned long dontcare)
+static void __prandom_timer(struct timer_list *unused)
 {
        u32 entropy;
        unsigned long expires;
index 191a238e5a9d73e67ee763371cf3f9a4a3865792..7d36c1e27ff6550de3f921bd865029b2b56487f7 100644 (file)
@@ -11,7 +11,7 @@
        MODULE_PARM_DESC(name, msg);
 
 __param(int, nnodes, 100, "Number of nodes in the rb-tree");
-__param(int, perf_loops, 100000, "Number of iterations modifying the rb-tree");
+__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
 __param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
 
 struct test_node {
index 5e8d410a93df5eb569e15815782be2027ec69d6e..64a9e33f1daae4b2bb02ba7dbdc0bf431756935f 100644 (file)
@@ -1052,144 +1052,3 @@ void fortify_panic(const char *name)
        BUG();
 }
 EXPORT_SYMBOL(fortify_panic);
-
-#ifdef CONFIG_STRING_SELFTEST
-#include <linux/slab.h>
-#include <linux/module.h>
-
-static __init int memset16_selftest(void)
-{
-       unsigned i, j, k;
-       u16 v, *p;
-
-       p = kmalloc(256 * 2 * 2, GFP_KERNEL);
-       if (!p)
-               return -1;
-
-       for (i = 0; i < 256; i++) {
-               for (j = 0; j < 256; j++) {
-                       memset(p, 0xa1, 256 * 2 * sizeof(v));
-                       memset16(p + i, 0xb1b2, j);
-                       for (k = 0; k < 512; k++) {
-                               v = p[k];
-                               if (k < i) {
-                                       if (v != 0xa1a1)
-                                               goto fail;
-                               } else if (k < i + j) {
-                                       if (v != 0xb1b2)
-                                               goto fail;
-                               } else {
-                                       if (v != 0xa1a1)
-                                               goto fail;
-                               }
-                       }
-               }
-       }
-
-fail:
-       kfree(p);
-       if (i < 256)
-               return (i << 24) | (j << 16) | k;
-       return 0;
-}
-
-static __init int memset32_selftest(void)
-{
-       unsigned i, j, k;
-       u32 v, *p;
-
-       p = kmalloc(256 * 2 * 4, GFP_KERNEL);
-       if (!p)
-               return -1;
-
-       for (i = 0; i < 256; i++) {
-               for (j = 0; j < 256; j++) {
-                       memset(p, 0xa1, 256 * 2 * sizeof(v));
-                       memset32(p + i, 0xb1b2b3b4, j);
-                       for (k = 0; k < 512; k++) {
-                               v = p[k];
-                               if (k < i) {
-                                       if (v != 0xa1a1a1a1)
-                                               goto fail;
-                               } else if (k < i + j) {
-                                       if (v != 0xb1b2b3b4)
-                                               goto fail;
-                               } else {
-                                       if (v != 0xa1a1a1a1)
-                                               goto fail;
-                               }
-                       }
-               }
-       }
-
-fail:
-       kfree(p);
-       if (i < 256)
-               return (i << 24) | (j << 16) | k;
-       return 0;
-}
-
-static __init int memset64_selftest(void)
-{
-       unsigned i, j, k;
-       u64 v, *p;
-
-       p = kmalloc(256 * 2 * 8, GFP_KERNEL);
-       if (!p)
-               return -1;
-
-       for (i = 0; i < 256; i++) {
-               for (j = 0; j < 256; j++) {
-                       memset(p, 0xa1, 256 * 2 * sizeof(v));
-                       memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
-                       for (k = 0; k < 512; k++) {
-                               v = p[k];
-                               if (k < i) {
-                                       if (v != 0xa1a1a1a1a1a1a1a1ULL)
-                                               goto fail;
-                               } else if (k < i + j) {
-                                       if (v != 0xb1b2b3b4b5b6b7b8ULL)
-                                               goto fail;
-                               } else {
-                                       if (v != 0xa1a1a1a1a1a1a1a1ULL)
-                                               goto fail;
-                               }
-                       }
-               }
-       }
-
-fail:
-       kfree(p);
-       if (i < 256)
-               return (i << 24) | (j << 16) | k;
-       return 0;
-}
-
-static __init int string_selftest_init(void)
-{
-       int test, subtest;
-
-       test = 1;
-       subtest = memset16_selftest();
-       if (subtest)
-               goto fail;
-
-       test = 2;
-       subtest = memset32_selftest();
-       if (subtest)
-               goto fail;
-
-       test = 3;
-       subtest = memset64_selftest();
-       if (subtest)
-               goto fail;
-
-       pr_info("String selftests succeeded\n");
-       return 0;
-fail:
-       pr_crit("String selftest failure %d.%08x\n", test, subtest);
-       return 0;
-}
-
-module_init(string_selftest_init);
-#endif /* CONFIG_STRING_SELFTEST */
diff --git a/lib/test_find_bit.c b/lib/test_find_bit.c
new file mode 100644 (file)
index 0000000..f4394a3
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Test for find_*_bit functions.
+ *
+ * Copyright (c) 2017 Cavium.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * find_bit functions are widely used in kernel, so the successful boot
+ * is good enough test for correctness.
+ *
+ * This test is focused on performance of traversing bitmaps. Two typical
+ * scenarios are reproduced:
+ * - randomly filled bitmap with approximately equal number of set and
+ *   cleared bits;
+ * - sparse bitmap with few set bits at random positions.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+
+#define BITMAP_LEN     (4096UL * 8 * 10)
+#define SPARSE         500
+
+static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata;
+
+/*
+ * This is Schlemiel the Painter's algorithm. It should be called after
+ * all other tests for the same bitmap because it sets all bits of bitmap to 1.
+ */
+static int __init test_find_first_bit(void *bitmap, unsigned long len)
+{
+       unsigned long i, cnt;
+       cycles_t cycles;
+
+       cycles = get_cycles();
+       for (cnt = i = 0; i < len; cnt++) {
+               i = find_first_bit(bitmap, len);
+               __clear_bit(i, bitmap);
+       }
+       cycles = get_cycles() - cycles;
+       pr_err("find_first_bit:\t\t%llu cycles,\t%ld iterations\n",
+              (u64)cycles, cnt);
+
+       return 0;
+}
+
+static int __init test_find_next_bit(const void *bitmap, unsigned long len)
+{
+       unsigned long i, cnt;
+       cycles_t cycles;
+
+       cycles = get_cycles();
+       for (cnt = i = 0; i < BITMAP_LEN; cnt++)
+               i = find_next_bit(bitmap, BITMAP_LEN, i) + 1;
+       cycles = get_cycles() - cycles;
+       pr_err("find_next_bit:\t\t%llu cycles,\t%ld iterations\n",
+              (u64)cycles, cnt);
+
+       return 0;
+}
+
+static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
+{
+       unsigned long i, cnt;
+       cycles_t cycles;
+
+       cycles = get_cycles();
+       for (cnt = i = 0; i < BITMAP_LEN; cnt++)
+               i = find_next_zero_bit(bitmap, len, i) + 1;
+       cycles = get_cycles() - cycles;
+       pr_err("find_next_zero_bit:\t%llu cycles,\t%ld iterations\n",
+              (u64)cycles, cnt);
+
+       return 0;
+}
+
+static int __init test_find_last_bit(const void *bitmap, unsigned long len)
+{
+       unsigned long l, cnt = 0;
+       cycles_t cycles;
+
+       cycles = get_cycles();
+       do {
+               cnt++;
+               l = find_last_bit(bitmap, len);
+               if (l >= len)
+                       break;
+               len = l;
+       } while (len);
+       cycles = get_cycles() - cycles;
+       pr_err("find_last_bit:\t\t%llu cycles,\t%ld iterations\n",
+              (u64)cycles, cnt);
+
+       return 0;
+}
+
+static int __init find_bit_test(void)
+{
+       unsigned long nbits = BITMAP_LEN / SPARSE;
+
+       pr_err("\nStart testing find_bit() with random-filled bitmap\n");
+
+       get_random_bytes(bitmap, sizeof(bitmap));
+
+       test_find_next_bit(bitmap, BITMAP_LEN);
+       test_find_next_zero_bit(bitmap, BITMAP_LEN);
+       test_find_last_bit(bitmap, BITMAP_LEN);
+       test_find_first_bit(bitmap, BITMAP_LEN);
+
+       pr_err("\nStart testing find_bit() with sparse bitmap\n");
+
+       bitmap_zero(bitmap, BITMAP_LEN);
+
+       while (nbits--)
+               __set_bit(prandom_u32() % BITMAP_LEN, bitmap);
+
+       test_find_next_bit(bitmap, BITMAP_LEN);
+       test_find_next_zero_bit(bitmap, BITMAP_LEN);
+       test_find_last_bit(bitmap, BITMAP_LEN);
+       test_find_first_bit(bitmap, BITMAP_LEN);
+
+       return 0;
+}
+module_init(find_bit_test);
+
+static void __exit test_find_bit_cleanup(void)
+{
+}
+module_exit(test_find_bit_cleanup);
+
+MODULE_LICENSE("GPL");
index a25c9763fce19f17c723b9db3645ae93ba47dcb6..ef1a3ac1397e88e436f0fd282804988f3d9b77a1 100644 (file)
@@ -353,10 +353,9 @@ static noinline void __init memcg_accounted_kmem_cache(void)
         */
        for (i = 0; i < 5; i++) {
                p = kmem_cache_alloc(cache, GFP_KERNEL);
-               if (!p) {
-                       pr_err("Allocation failed\n");
+               if (!p)
                        goto free_cache;
-               }
+
                kmem_cache_free(cache, p);
                msleep(100);
        }
index fba78d25e82569e57845d16b36c271d0848099b0..337f408b4de605641ac925f502b95758f1fc4841 100644 (file)
@@ -783,10 +783,8 @@ static int kmod_config_sync_info(struct kmod_test_device *test_dev)
        free_test_dev_info(test_dev);
        test_dev->info = vzalloc(config->num_threads *
                                 sizeof(struct kmod_test_device_info));
-       if (!test_dev->info) {
-               dev_err(test_dev->dev, "Cannot alloc test_dev info\n");
+       if (!test_dev->info)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -1089,10 +1087,8 @@ static struct kmod_test_device *alloc_test_dev_kmod(int idx)
        struct miscdevice *misc_dev;
 
        test_dev = vzalloc(sizeof(struct kmod_test_device));
-       if (!test_dev) {
-               pr_err("Cannot alloc test_dev\n");
+       if (!test_dev)
                goto err_out;
-       }
 
        mutex_init(&test_dev->config_mutex);
        mutex_init(&test_dev->trigger_mutex);
index 28e817387b04f1c8862c54deefd4e8309f26437e..5474f3f3e41d0715fec16a6db866a23c45ed231c 100644 (file)
@@ -76,17 +76,14 @@ static int __init list_sort_test(void)
        pr_debug("start testing list_sort()\n");
 
        elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
-       if (!elts) {
-               pr_err("error: cannot allocate memory\n");
+       if (!elts)
                return err;
-       }
 
        for (i = 0; i < TEST_LIST_LEN; i++) {
                el = kmalloc(sizeof(*el), GFP_KERNEL);
-               if (!el) {
-                       pr_err("error: cannot allocate memory\n");
+               if (!el)
                        goto exit;
-               }
+
                 /* force some equivalencies */
                el->value = prandom_u32() % (TEST_LIST_LEN / 3);
                el->serial = i;
index 563f10e6876aecf6e3932e0a2e955305f271b249..71ebfa43ad05f2bdbbd011989dc92b5efac2cdd6 100644 (file)
 #define PAD_SIZE 16
 #define FILL_CHAR '$'
 
-#define PTR1 ((void*)0x01234567)
-#define PTR2 ((void*)(long)(int)0xfedcba98)
-
-#if BITS_PER_LONG == 64
-#define PTR1_ZEROES "000000000"
-#define PTR1_SPACES "         "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fffffffffedcba98"
-#define PTR_WIDTH 16
-#else
-#define PTR1_ZEROES "0"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fedcba98"
-#define PTR_WIDTH 8
-#endif
-#define PTR_WIDTH_STR stringify(PTR_WIDTH)
-
 static unsigned total_tests __initdata;
 static unsigned failed_tests __initdata;
 static char *test_buffer __initdata;
@@ -217,30 +199,79 @@ test_string(void)
        test("a  |   |   ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
 }
 
+#define PLAIN_BUF_SIZE 64      /* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789ab)
+#define PTR_STR "ffff0123456789ab"
+#define ZEROS "00000000"       /* hex 32 zero bits */
+
+static int __init
+plain_format(void)
+{
+       char buf[PLAIN_BUF_SIZE];
+       int nchars;
+
+       nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+       if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+               return -1;
+
+       return 0;
+}
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+
+static int __init
+plain_format(void)
+{
+       /* Format is implicitly tested for 32 bit machines by plain_hash() */
+       return 0;
+}
+
+#endif /* BITS_PER_LONG == 64 */
+
+static int __init
+plain_hash(void)
+{
+       char buf[PLAIN_BUF_SIZE];
+       int nchars;
+
+       nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+       if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * We can't use test() to test %p because we don't know what output to expect
+ * after an address is hashed.
+ */
 static void __init
 plain(void)
 {
-       test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2);
-       /*
-        * The field width is overloaded for some %p extensions to
-        * pass another piece of information. For plain pointers, the
-        * behaviour is slightly odd: One cannot pass either the 0
-        * flag nor a precision to %p without gcc complaining, and if
-        * one explicitly gives a field width, the number is no longer
-        * zero-padded.
-        */
-       test("|" PTR1_STR PTR1_SPACES "  |  " PTR1_SPACES PTR1_STR "|",
-            "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1);
-       test("|" PTR2_STR "  |  " PTR2_STR "|",
-            "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2);
+       int err;
 
-       /*
-        * Unrecognized %p extensions are treated as plain %p, but the
-        * alphanumeric suffix is ignored (that is, does not occur in
-        * the output.)
-        */
-       test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1);
-       test("|"PTR2_STR"|", "|%p0y|", PTR2);
+       err = plain_hash();
+       if (err) {
+               pr_warn("plain 'p' does not appear to be hashed\n");
+               failed_tests++;
+               return;
+       }
+
+       err = plain_format();
+       if (err) {
+               pr_warn("hashing plain 'p' has unexpected format\n");
+               failed_tests++;
+       }
 }
 
 static void __init
@@ -251,6 +282,7 @@ symbol_ptr(void)
 static void __init
 kernel_ptr(void)
 {
+       /* We can't test this without access to kptr_restrict. */
 }
 
 static void __init
diff --git a/lib/test_string.c b/lib/test_string.c
new file mode 100644 (file)
index 0000000..0fcdb82
--- /dev/null
@@ -0,0 +1,141 @@
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+static __init int memset16_selftest(void)
+{
+       unsigned i, j, k;
+       u16 v, *p;
+
+       p = kmalloc(256 * 2 * 2, GFP_KERNEL);
+       if (!p)
+               return -1;
+
+       for (i = 0; i < 256; i++) {
+               for (j = 0; j < 256; j++) {
+                       memset(p, 0xa1, 256 * 2 * sizeof(v));
+                       memset16(p + i, 0xb1b2, j);
+                       for (k = 0; k < 512; k++) {
+                               v = p[k];
+                               if (k < i) {
+                                       if (v != 0xa1a1)
+                                               goto fail;
+                               } else if (k < i + j) {
+                                       if (v != 0xb1b2)
+                                               goto fail;
+                               } else {
+                                       if (v != 0xa1a1)
+                                               goto fail;
+                               }
+                       }
+               }
+       }
+
+fail:
+       kfree(p);
+       if (i < 256)
+               return (i << 24) | (j << 16) | k;
+       return 0;
+}
+
+static __init int memset32_selftest(void)
+{
+       unsigned i, j, k;
+       u32 v, *p;
+
+       p = kmalloc(256 * 2 * 4, GFP_KERNEL);
+       if (!p)
+               return -1;
+
+       for (i = 0; i < 256; i++) {
+               for (j = 0; j < 256; j++) {
+                       memset(p, 0xa1, 256 * 2 * sizeof(v));
+                       memset32(p + i, 0xb1b2b3b4, j);
+                       for (k = 0; k < 512; k++) {
+                               v = p[k];
+                               if (k < i) {
+                                       if (v != 0xa1a1a1a1)
+                                               goto fail;
+                               } else if (k < i + j) {
+                                       if (v != 0xb1b2b3b4)
+                                               goto fail;
+                               } else {
+                                       if (v != 0xa1a1a1a1)
+                                               goto fail;
+                               }
+                       }
+               }
+       }
+
+fail:
+       kfree(p);
+       if (i < 256)
+               return (i << 24) | (j << 16) | k;
+       return 0;
+}
+
+static __init int memset64_selftest(void)
+{
+       unsigned i, j, k;
+       u64 v, *p;
+
+       p = kmalloc(256 * 2 * 8, GFP_KERNEL);
+       if (!p)
+               return -1;
+
+       for (i = 0; i < 256; i++) {
+               for (j = 0; j < 256; j++) {
+                       memset(p, 0xa1, 256 * 2 * sizeof(v));
+                       memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
+                       for (k = 0; k < 512; k++) {
+                               v = p[k];
+                               if (k < i) {
+                                       if (v != 0xa1a1a1a1a1a1a1a1ULL)
+                                               goto fail;
+                               } else if (k < i + j) {
+                                       if (v != 0xb1b2b3b4b5b6b7b8ULL)
+                                               goto fail;
+                               } else {
+                                       if (v != 0xa1a1a1a1a1a1a1a1ULL)
+                                               goto fail;
+                               }
+                       }
+               }
+       }
+
+fail:
+       kfree(p);
+       if (i < 256)
+               return (i << 24) | (j << 16) | k;
+       return 0;
+}
+
+static __init int string_selftest_init(void)
+{
+       int test, subtest;
+
+       test = 1;
+       subtest = memset16_selftest();
+       if (subtest)
+               goto fail;
+
+       test = 2;
+       subtest = memset32_selftest();
+       if (subtest)
+               goto fail;
+
+       test = 3;
+       subtest = memset64_selftest();
+       if (subtest)
+               goto fail;
+
+       pr_info("String selftests succeeded\n");
+       return 0;
+fail:
+       pr_crit("String selftest failure %d.%08x\n", test, subtest);
+       return 0;
+}
+
+module_init(string_selftest_init);
+MODULE_LICENSE("GPL v2");
index 1746bae94d416f6ce3311c569e5d99080173f998..01c3957b2de621ae21fed6162058c37f801ff526 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/uuid.h>
 #include <linux/of.h>
 #include <net/addrconf.h>
+#include <linux/siphash.h>
+#include <linux/compiler.h>
 #ifdef CONFIG_BLOCK
 #include <linux/blkdev.h>
 #endif
@@ -1343,6 +1345,59 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        return string(buf, end, uuid, spec);
 }
 
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+                        struct printf_spec spec)
+{
+       spec.base = 16;
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = 2 * sizeof(ptr);
+               spec.flags |= ZEROPAD;
+       }
+
+       switch (kptr_restrict) {
+       case 0:
+               /* Always print %pK values */
+               break;
+       case 1: {
+               const struct cred *cred;
+
+               /*
+                * kptr_restrict==1 cannot be used in IRQ context
+                * because its test for CAP_SYSLOG would be meaningless.
+                */
+               if (in_irq() || in_serving_softirq() || in_nmi())
+                       return string(buf, end, "pK-error", spec);
+
+               /*
+                * Only print the real pointer value if the current
+                * process has CAP_SYSLOG and is running with the
+                * same credentials it started with. This is because
+                * access to files is checked at open() time, but %pK
+                * checks permission at read() time. We don't want to
+                * leak pointer values if a binary opens a file using
+                * %pK and then elevates privileges before reading it.
+                */
+               cred = current_cred();
+               if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+                   !uid_eq(cred->euid, cred->uid) ||
+                   !gid_eq(cred->egid, cred->gid))
+                       ptr = NULL;
+               break;
+       }
+       case 2:
+       default:
+               /* Always print 0's for %pK */
+               ptr = NULL;
+               break;
+       }
+
+       return number(buf, end, (unsigned long)ptr, spec);
+}
+
 static noinline_for_stack
 char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
 {
@@ -1591,7 +1646,86 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
        return widen_string(buf, buf - buf_start, end, spec);
 }
 
-int kptr_restrict __read_mostly;
+static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+                    struct printf_spec spec)
+{
+       spec.base = 16;
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = 2 * sizeof(ptr);
+               spec.flags |= ZEROPAD;
+       }
+
+       return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+static bool have_filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+       get_random_bytes(&ptr_key, sizeof(ptr_key));
+       /*
+        * have_filled_random_ptr_key==true is dependent on get_random_bytes().
+        * ptr_to_id() needs to see have_filled_random_ptr_key==true
+        * after get_random_bytes() returns.
+        */
+       smp_mb();
+       WRITE_ONCE(have_filled_random_ptr_key, true);
+}
+
+static struct random_ready_callback random_ready = {
+       .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+       int ret = add_random_ready_callback(&random_ready);
+
+       if (!ret) {
+               return 0;
+       } else if (ret == -EALREADY) {
+               fill_random_ptr_key(&random_ready);
+               return 0;
+       }
+
+       return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
+{
+       unsigned long hashval;
+       const int default_width = 2 * sizeof(ptr);
+
+       if (unlikely(!have_filled_random_ptr_key)) {
+               spec.field_width = default_width;
+               /* string length must be less than default_width */
+               return string(buf, end, "(ptrval)", spec);
+       }
+
+#ifdef CONFIG_64BIT
+       hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+       /*
+        * Mask off the first 32 bits, this makes explicit that we have
+        * modified the address (and 32 bits is plenty for a unique ID).
+        */
+       hashval = hashval & 0xffffffff;
+#else
+       hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+
+       spec.flags |= SMALL;
+       if (spec.field_width == -1) {
+               spec.field_width = default_width;
+               spec.flags |= ZEROPAD;
+       }
+       spec.base = 16;
+
+       return number(buf, end, hashval, spec);
+}
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
@@ -1698,11 +1832,16 @@ int kptr_restrict __read_mostly;
  *                        c major compatible string
  *                        C full compatible string
  *
+ * - 'x' For printing the address. Equivalent to "%lx".
+ *
  * ** Please update also Documentation/printk-formats.txt when making changes **
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
  * pointer to the real address.
+ *
+ * Note: The default behaviour (unadorned %p) is to hash the address,
+ * rendering it useful as a unique identifier.
  */
 static noinline_for_stack
 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -1792,47 +1931,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                        return buf;
                }
        case 'K':
-               switch (kptr_restrict) {
-               case 0:
-                       /* Always print %pK values */
-                       break;
-               case 1: {
-                       const struct cred *cred;
-
-                       /*
-                        * kptr_restrict==1 cannot be used in IRQ context
-                        * because its test for CAP_SYSLOG would be meaningless.
-                        */
-                       if (in_irq() || in_serving_softirq() || in_nmi()) {
-                               if (spec.field_width == -1)
-                                       spec.field_width = default_width;
-                               return string(buf, end, "pK-error", spec);
-                       }
-
-                       /*
-                        * Only print the real pointer value if the current
-                        * process has CAP_SYSLOG and is running with the
-                        * same credentials it started with. This is because
-                        * access to files is checked at open() time, but %pK
-                        * checks permission at read() time. We don't want to
-                        * leak pointer values if a binary opens a file using
-                        * %pK and then elevates privileges before reading it.
-                        */
-                       cred = current_cred();
-                       if (!has_capability_noaudit(current, CAP_SYSLOG) ||
-                           !uid_eq(cred->euid, cred->uid) ||
-                           !gid_eq(cred->egid, cred->gid))
-                               ptr = NULL;
-                       break;
-               }
-               case 2:
-               default:
-                       /* Always print 0's for %pK */
-                       ptr = NULL;
+               if (!kptr_restrict)
                        break;
-               }
-               break;
-
+               return restricted_pointer(buf, end, ptr, spec);
        case 'N':
                return netdev_bits(buf, end, ptr, fmt);
        case 'a':
@@ -1857,15 +1958,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                case 'F':
                        return device_node_string(buf, end, ptr, spec, fmt + 1);
                }
+       case 'x':
+               return pointer_string(buf, end, ptr, spec);
        }
-       spec.flags |= SMALL;
-       if (spec.field_width == -1) {
-               spec.field_width = default_width;
-               spec.flags |= ZEROPAD;
-       }
-       spec.base = 16;
 
-       return number(buf, end, (unsigned long) ptr, spec);
+       /* default is to _not_ leak addresses, hash before printing */
+       return ptr_to_id(buf, end, ptr, spec);
 }
 
 /*
index 9c4bdddd80c2123ac522c7d6c08453cca6c8445f..03ff7703d3228e66571571a264e3b115d0dd6233 100644 (file)
@@ -756,3 +756,12 @@ config PERCPU_STATS
          This feature collects and exposes statistics via debugfs. The
          information includes global and per chunk statistics, which can
          be used to help understand percpu memory usage.
+
+config GUP_BENCHMARK
+       bool "Enable infrastructure for get_user_pages_fast() benchmarking"
+       default n
+       help
+         Provides /sys/kernel/debug/gup_benchmark that helps with testing
+         performance of get_user_pages_fast().
+
+         See tools/testing/selftests/vm/gup_benchmark.c
index e7ebd176fb935e382c9da55c39fde038f256c9e5..e669f02c5a5425f00b57eef345c52d05b69a014e 100644 (file)
@@ -80,6 +80,7 @@ obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
 obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
 obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o
 obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
+obj-$(CONFIG_GUP_BENCHMARK) += gup_benchmark.o
 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
 obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
 obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
index 85395dc6eb137f128961a4429ed69b8e030ca816..10cd757f1006b8646b6cf1a386b8b0e5b5d049f0 100644 (file)
@@ -218,6 +218,24 @@ static void reset_cached_positions(struct zone *zone)
                                pageblock_start_pfn(zone_end_pfn(zone) - 1);
 }
 
+/*
+ * Compound pages of >= pageblock_order should consistenly be skipped until
+ * released. It is always pointless to compact pages of such order (if they are
+ * migratable), and the pageblocks they occupy cannot contain any free pages.
+ */
+static bool pageblock_skip_persistent(struct page *page)
+{
+       if (!PageCompound(page))
+               return false;
+
+       page = compound_head(page);
+
+       if (compound_order(page) >= pageblock_order)
+               return true;
+
+       return false;
+}
+
 /*
  * This function is called to clear all cached information on pageblocks that
  * should be skipped for page isolation when the migrate and free page scanner
@@ -242,6 +260,8 @@ static void __reset_isolation_suitable(struct zone *zone)
                        continue;
                if (zone != page_zone(page))
                        continue;
+               if (pageblock_skip_persistent(page))
+                       continue;
 
                clear_pageblock_skip(page);
        }
@@ -275,7 +295,7 @@ static void update_pageblock_skip(struct compact_control *cc,
        struct zone *zone = cc->zone;
        unsigned long pfn;
 
-       if (cc->ignore_skip_hint)
+       if (cc->no_set_skip_hint)
                return;
 
        if (!page)
@@ -307,7 +327,12 @@ static inline bool isolation_suitable(struct compact_control *cc,
        return true;
 }
 
-static void update_pageblock_skip(struct compact_control *cc,
+static inline bool pageblock_skip_persistent(struct page *page)
+{
+       return false;
+}
+
+static inline void update_pageblock_skip(struct compact_control *cc,
                        struct page *page, unsigned long nr_isolated,
                        bool migrate_scanner)
 {
@@ -449,13 +474,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                 * and the only danger is skipping too much.
                 */
                if (PageCompound(page)) {
-                       unsigned int comp_order = compound_order(page);
+                       const unsigned int order = compound_order(page);
 
-                       if (likely(comp_order < MAX_ORDER)) {
-                               blockpfn += (1UL << comp_order) - 1;
-                               cursor += (1UL << comp_order) - 1;
+                       if (likely(order < MAX_ORDER)) {
+                               blockpfn += (1UL << order) - 1;
+                               cursor += (1UL << order) - 1;
                        }
-
                        goto isolate_fail;
                }
 
@@ -772,11 +796,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                 * danger is skipping too much.
                 */
                if (PageCompound(page)) {
-                       unsigned int comp_order = compound_order(page);
-
-                       if (likely(comp_order < MAX_ORDER))
-                               low_pfn += (1UL << comp_order) - 1;
+                       const unsigned int order = compound_order(page);
 
+                       if (likely(order < MAX_ORDER))
+                               low_pfn += (1UL << order) - 1;
                        goto isolate_fail;
                }
 
@@ -1928,9 +1951,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                .total_free_scanned = 0,
                .classzone_idx = pgdat->kcompactd_classzone_idx,
                .mode = MIGRATE_SYNC_LIGHT,
-               .ignore_skip_hint = true,
+               .ignore_skip_hint = false,
                .gfp_mask = GFP_KERNEL,
-
        };
        trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
                                                        cc.classzone_idx);
index 2f98df0d460eef41f80586544ad98abb66fae60c..297c7238f7d4094a6ac4ab0dc72e04abb870972f 100644 (file)
@@ -53,6 +53,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
                ret = -EFAULT;
                goto out;
        }
+
+       /*
+        * While get_vaddr_frames() could be used for transient (kernel
+        * controlled lifetime) pinning of memory pages all current
+        * users establish long term (userspace controlled lifetime)
+        * page pinning. Treat get_vaddr_frames() like
+        * get_user_pages_longterm() and disallow it for filesystem-dax
+        * mappings.
+        */
+       if (vma_is_fsdax(vma))
+               return -EOPNOTSUPP;
+
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
                vec->got_ref = true;
                vec->is_pfns = false;
index dfcde13f289a76ddcb54919f900467aeab15609d..d3fb60e5bfacd4c733957dc526c28c41bd2321d1 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  */
 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
 {
-       return pte_write(pte) ||
+       return pte_access_permitted(pte, WRITE) ||
                ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
 }
 
@@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
 }
 EXPORT_SYMBOL(get_user_pages);
 
+#ifdef CONFIG_FS_DAX
+/*
+ * This is the same as get_user_pages() in that it assumes we are
+ * operating on the current task's mm, but it goes further to validate
+ * that the vmas associated with the address range are suitable for
+ * longterm elevated page reference counts. For example, filesystem-dax
+ * mappings are subject to the lifetime enforced by the filesystem and
+ * we need guarantees that longterm users like RDMA and V4L2 only
+ * establish mappings that have a kernel enforced revocation mechanism.
+ *
+ * "longterm" == userspace controlled elevated page count lifetime.
+ * Contrast this to iov_iter_get_pages() usages which are transient.
+ */
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+               unsigned int gup_flags, struct page **pages,
+               struct vm_area_struct **vmas_arg)
+{
+       struct vm_area_struct **vmas = vmas_arg;
+       struct vm_area_struct *vma_prev = NULL;
+       long rc, i;
+
+       if (!pages)
+               return -EINVAL;
+
+       if (!vmas) {
+               vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
+                              GFP_KERNEL);
+               if (!vmas)
+                       return -ENOMEM;
+       }
+
+       rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+
+       for (i = 0; i < rc; i++) {
+               struct vm_area_struct *vma = vmas[i];
+
+               if (vma == vma_prev)
+                       continue;
+
+               vma_prev = vma;
+
+               if (vma_is_fsdax(vma))
+                       break;
+       }
+
+       /*
+        * Either get_user_pages() failed, or the vma validation
+        * succeeded, in either case we don't need to put_page() before
+        * returning.
+        */
+       if (i >= rc)
+               goto out;
+
+       for (i = 0; i < rc; i++)
+               put_page(pages[i]);
+       rc = -EOPNOTSUPP;
+out:
+       if (vmas != vmas_arg)
+               kfree(vmas);
+       return rc;
+}
+EXPORT_SYMBOL(get_user_pages_longterm);
+#endif /* CONFIG_FS_DAX */
+
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
  * @vma:   target vma
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
new file mode 100644 (file)
index 0000000..5c8e2ab
--- /dev/null
@@ -0,0 +1,100 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+
+#define GUP_FAST_BENCHMARK     _IOWR('g', 1, struct gup_benchmark)
+
+struct gup_benchmark {
+       __u64 delta_usec;
+       __u64 addr;
+       __u64 size;
+       __u32 nr_pages_per_call;
+       __u32 flags;
+};
+
+static int __gup_benchmark_ioctl(unsigned int cmd,
+               struct gup_benchmark *gup)
+{
+       ktime_t start_time, end_time;
+       unsigned long i, nr, nr_pages, addr, next;
+       struct page **pages;
+
+       nr_pages = gup->size / PAGE_SIZE;
+       pages = kvmalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
+       if (!pages)
+               return -ENOMEM;
+
+       i = 0;
+       nr = gup->nr_pages_per_call;
+       start_time = ktime_get();
+       for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) {
+               if (nr != gup->nr_pages_per_call)
+                       break;
+
+               next = addr + nr * PAGE_SIZE;
+               if (next > gup->addr + gup->size) {
+                       next = gup->addr + gup->size;
+                       nr = (next - addr) / PAGE_SIZE;
+               }
+
+               nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i);
+               i += nr;
+       }
+       end_time = ktime_get();
+
+       gup->delta_usec = ktime_us_delta(end_time, start_time);
+       gup->size = addr - gup->addr;
+
+       for (i = 0; i < nr_pages; i++) {
+               if (!pages[i])
+                       break;
+               put_page(pages[i]);
+       }
+
+       kvfree(pages);
+       return 0;
+}
+
+static long gup_benchmark_ioctl(struct file *filep, unsigned int cmd,
+               unsigned long arg)
+{
+       struct gup_benchmark gup;
+       int ret;
+
+       if (cmd != GUP_FAST_BENCHMARK)
+               return -EINVAL;
+
+       if (copy_from_user(&gup, (void __user *)arg, sizeof(gup)))
+               return -EFAULT;
+
+       ret = __gup_benchmark_ioctl(cmd, &gup);
+       if (ret)
+               return ret;
+
+       if (copy_to_user((void __user *)arg, &gup, sizeof(gup)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static const struct file_operations gup_benchmark_fops = {
+       .open = nonseekable_open,
+       .unlocked_ioctl = gup_benchmark_ioctl,
+};
+
+static int gup_benchmark_init(void)
+{
+       void *ret;
+
+       ret = debugfs_create_file_unsafe("gup_benchmark", 0600, NULL, NULL,
+                       &gup_benchmark_fops);
+       if (!ret)
+               pr_warn("Failed to create gup_benchmark in debugfs");
+
+       return 0;
+}
+
+late_initcall(gup_benchmark_init);
index ea19742a5d60b1a6270629a024d88a13b9c5f3c1..3a5c172af56039bb26007ea4cc5ec4ca0a9bf659 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -391,11 +391,11 @@ again:
                if (pmd_protnone(pmd))
                        return hmm_vma_walk_clear(start, end, walk);
 
-               if (write_fault && !pmd_write(pmd))
+               if (!pmd_access_permitted(pmd, write_fault))
                        return hmm_vma_walk_clear(start, end, walk);
 
                pfn = pmd_pfn(pmd) + pte_index(addr);
-               flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
+               flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0;
                for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
                        pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
                return 0;
@@ -456,11 +456,11 @@ again:
                        continue;
                }
 
-               if (write_fault && !pte_write(pte))
+               if (!pte_access_permitted(pte, write_fault))
                        goto fault;
 
                pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag;
-               pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
+               pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0;
                continue;
 
 fault:
index 86fe697e8bfb3c4e8393b23a09f177965450ad93..2f2f5e77490278f58c6e9a923899255efff77551 100644 (file)
@@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd)
+               pmd_t *pmd, int flags)
 {
        pmd_t _pmd;
 
-       /*
-        * We should set the dirty bit only for FOLL_WRITE but for now
-        * the dirty bit in the pmd is meaningless.  And if the dirty
-        * bit will become meaningful and we'll only set it with
-        * FOLL_WRITE, an atomic set_bit will be required on the pmd to
-        * set the young bit, instead of the current set_pmd_at.
-        */
-       _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+       _pmd = pmd_mkyoung(*pmd);
+       if (flags & FOLL_WRITE)
+               _pmd = pmd_mkdirty(_pmd);
        if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
-                               pmd, _pmd,  1))
+                               pmd, _pmd, flags & FOLL_WRITE))
                update_mmu_cache_pmd(vma, addr, pmd);
 }
 
@@ -875,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
         */
        WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
                return NULL;
 
        if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd);
+               touch_pmd(vma, addr, pmd, flags);
 
        /*
         * device mapped pages can only be returned if the
@@ -995,20 +990,15 @@ out:
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
-               pud_t *pud)
+               pud_t *pud, int flags)
 {
        pud_t _pud;
 
-       /*
-        * We should set the dirty bit only for FOLL_WRITE but for now
-        * the dirty bit in the pud is meaningless.  And if the dirty
-        * bit will become meaningful and we'll only set it with
-        * FOLL_WRITE, an atomic set_bit will be required on the pud to
-        * set the young bit, instead of the current set_pud_at.
-        */
-       _pud = pud_mkyoung(pud_mkdirty(*pud));
+       _pud = pud_mkyoung(*pud);
+       if (flags & FOLL_WRITE)
+               _pud = pud_mkdirty(_pud);
        if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
-                               pud, _pud,  1))
+                               pud, _pud, flags & FOLL_WRITE))
                update_mmu_cache_pud(vma, addr, pud);
 }
 
@@ -1022,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pud_lockptr(mm, pud));
 
-       if (flags & FOLL_WRITE && !pud_write(*pud))
+       if (!pud_access_permitted(*pud, flags & FOLL_WRITE))
                return NULL;
 
        if (pud_present(*pud) && pud_devmap(*pud))
@@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
 
        if (flags & FOLL_TOUCH)
-               touch_pud(vma, addr, pud);
+               touch_pud(vma, addr, pud, flags);
 
        /*
         * device mapped pages can only be returned if the
@@ -1396,7 +1386,7 @@ out_unlock:
  */
 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
 {
-       return pmd_write(pmd) ||
+       return pmd_access_permitted(pmd, WRITE) ||
               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
 }
 
@@ -1424,7 +1414,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        page = pmd_page(*pmd);
        VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_TOUCH)
-               touch_pmd(vma, addr, pmd);
+               touch_pmd(vma, addr, pmd, flags);
        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
                /*
                 * We don't mlock() pte-mapped THPs. This way we can avoid
index 681b300185c0c0383bb240d6a898849bf777f46b..9a334f5fb730873190a57648bc0f040f91ac0ed6 100644 (file)
@@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
        }
 }
 
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       if (addr & ~(huge_page_mask(hstate_vma(vma))))
+               return -EINVAL;
+       return 0;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
+       .split = hugetlb_vm_op_split,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -4627,7 +4635,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
-       p4d = p4d_offset(pgd, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
        pud = pud_alloc(mm, p4d, addr);
        if (pud) {
                if (sz == PUD_SIZE) {
index 1df011f624801ffbdf3379a6d17218d9b32280e1..e6bd35182daee1226b684464fd202df86762c636 100644 (file)
@@ -198,6 +198,7 @@ struct compact_control {
        const int classzone_idx;        /* zone index of a direct compactor */
        enum migrate_mode mode;         /* Async or sync migration mode */
        bool ignore_skip_hint;          /* Scan blocks even if marked skip */
+       bool no_set_skip_hint;          /* Don't mark blocks for skipping */
        bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
        bool direct_compaction;         /* False from kcompactd or /proc/... */
        bool whole_zone;                /* Whole zone should/has been scanned */
index 6bcfb01ba0386e5bf2ec49512321e59a1fac0661..410c8235e671501ab6f6876a13b419b448cc624c 100644 (file)
@@ -134,7 +134,7 @@ static void print_error_description(struct kasan_access_info *info)
 
        pr_err("BUG: KASAN: %s in %pS\n",
                bug_type, (void *)info->ip);
-       pr_err("%s of size %zu at addr %p by task %s/%d\n",
+       pr_err("%s of size %zu at addr %px by task %s/%d\n",
                info->is_write ? "Write" : "Read", info->access_size,
                info->access_addr, current->comm, task_pid_nr(current));
 }
@@ -206,7 +206,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
        const char *rel_type;
        int rel_bytes;
 
-       pr_err("The buggy address belongs to the object at %p\n"
+       pr_err("The buggy address belongs to the object at %px\n"
               " which belongs to the cache %s of size %d\n",
                object, cache->name, cache->object_size);
 
@@ -225,7 +225,7 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
        }
 
        pr_err("The buggy address is located %d bytes %s of\n"
-              " %d-byte region [%p, %p)\n",
+              " %d-byte region [%px, %px)\n",
                rel_bytes, rel_type, cache->object_size, (void *)object_addr,
                (void *)(object_addr + cache->object_size));
 }
@@ -302,7 +302,7 @@ static void print_shadow_for_address(const void *addr)
                char shadow_buf[SHADOW_BYTES_PER_ROW];
 
                snprintf(buffer, sizeof(buffer),
-                       (i == 0) ? ">%p: " : " %p: ", kaddr);
+                       (i == 0) ? ">%px: " : " %px: ", kaddr);
                /*
                 * We should not pass a shadow pointer to generic
                 * function, because generic functions may try to
index e4738d5e9b8c5214c106756b311e102eaf2cdad1..3d4781756d50fef924f52c0d9cb6cb0cbddd479f 100644 (file)
@@ -1523,6 +1523,8 @@ static void kmemleak_scan(void)
                        if (page_count(page) == 0)
                                continue;
                        scan_block(page, page + 1, NULL);
+                       if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
+                               cond_resched();
                }
        }
        put_online_mems();
index 375cf32087e4a2da0c42b251a1d5538ffaa1c857..751e97aa22106f9be73919033271ad9f98498fca 100644 (file)
@@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
 {
        struct file *file = vma->vm_file;
 
+       *prev = vma;
 #ifdef CONFIG_SWAP
        if (!file) {
-               *prev = vma;
                force_swapin_readahead(vma, start, end);
                return 0;
        }
 
        if (shmem_mapping(file->f_mapping)) {
-               *prev = vma;
                force_shm_swapin_readahead(vma, start, end,
                                        file->f_mapping);
                return 0;
@@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
                return 0;
        }
 
-       *prev = vma;
        start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        if (end > vma->vm_end)
                end = vma->vm_end;
index 50e6906314f8d9c987181744c9a45b54872edfb6..ac2ffd5e02b914fb9564649c9475babc51119de6 100644 (file)
@@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        memcg_check_events(memcg, page);
 
        if (!mem_cgroup_is_root(memcg))
-               css_put(&memcg->css);
+               css_put_many(&memcg->css, nr_entries);
 }
 
 /**
index 85e7a87da79fe4a5487e1f3f6216e61b9827515c..5eb3d2524bdc28239b33a0ac6e385fa5a5b9aaf9 100644 (file)
@@ -3948,7 +3948,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
        if (unlikely(!pte_same(*vmf->pte, entry)))
                goto unlock;
        if (vmf->flags & FAULT_FLAG_WRITE) {
-               if (!pte_write(entry))
+               if (!pte_access_permitted(entry, WRITE))
                        return do_wp_page(vmf);
                entry = pte_mkdirty(entry);
        }
@@ -4013,7 +4013,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 
                        /* NUMA case for anonymous PUDs would go here */
 
-                       if (dirty && !pud_write(orig_pud)) {
+                       if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
                                ret = wp_huge_pud(&vmf, orig_pud);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
@@ -4046,7 +4046,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
                        if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&vmf, orig_pmd);
 
-                       if (dirty && !pmd_write(orig_pmd)) {
+                       if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
                                ret = wp_huge_pmd(&vmf, orig_pmd);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
@@ -4336,7 +4336,7 @@ int follow_phys(struct vm_area_struct *vma,
                goto out;
        pte = *ptep;
 
-       if ((flags & FOLL_WRITE) && !pte_write(pte))
+       if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                goto unlock;
 
        *prot = pgprot_val(pte_pgprot(pte));
index 924839fac0e6421a77839825a99833a342d3153c..a4d5468212149db8a4cf20f9917c7bf48231a9ce 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2555,9 +2555,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_area_struct *new;
        int err;
 
-       if (is_vm_hugetlb_page(vma) && (addr &
-                                       ~(huge_page_mask(hstate_vma(vma)))))
-               return -EINVAL;
+       if (vma->vm_ops && vma->vm_ops->split) {
+               err = vma->vm_ops->split(vma, addr);
+               if (err)
+                       return err;
+       }
 
        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
index c86fbd1b590ecda69741d4c1d9a9c0875d98ee69..c957be32b27a9e7a17a6e33e69a31b1b6fa8e820 100644 (file)
@@ -550,7 +550,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
         */
        set_bit(MMF_UNSTABLE, &mm->flags);
 
-       tlb_gather_mmu(&tlb, mm, 0, -1);
        for (vma = mm->mmap ; vma; vma = vma->vm_next) {
                if (!can_madv_dontneed_vma(vma))
                        continue;
@@ -565,11 +564,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
                 * we do not want to block exit_mmap by keeping mm ref
                 * count elevated without a good reason.
                 */
-               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
+               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+                       tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
                        unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
                                         NULL);
+                       tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+               }
        }
-       tlb_finish_mmu(&tlb, 0, -1);
        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
                        task_pid_nr(tsk), tsk->comm,
                        K(get_mm_counter(mm, MM_ANONPAGES)),
index 8a1551154285d764207a641aed035f7e13c11b14..586f31261c8328e30106254e09e52fa6e93f410e 100644 (file)
@@ -433,11 +433,8 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        else
                bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
-       if (unlikely(bg_thresh >= thresh)) {
-               pr_warn("vm direct limit must be set greater than background limit.\n");
+       if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;
-       }
-
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
@@ -1993,11 +1990,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
 }
 
 #ifdef CONFIG_BLOCK
-void laptop_mode_timer_fn(unsigned long data)
+void laptop_mode_timer_fn(struct timer_list *t)
 {
-       struct request_queue *q = (struct request_queue *)data;
+       struct backing_dev_info *backing_dev_info =
+               from_timer(backing_dev_info, t, laptop_mode_wb_timer);
 
-       wakeup_flusher_threads_bdi(q->backing_dev_info, WB_REASON_LAPTOP_TIMER);
+       wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
 }
 
 /*
index 55ded92f9809e0ff798589658e87bd1c85bc69a0..73f5d4556b3d0b7218bea0cb9bb0fdd1f1cb3cdd 100644 (file)
@@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone)
        if (WARN_ON_ONCE(!mm_percpu_wq))
                return;
 
-       /* Workqueues cannot recurse */
-       if (current->flags & PF_WQ_WORKER)
-               return;
-
        /*
         * Do not drain if one is already in progress unless it's specific to
         * a zone. Such callers are primarily CMA and memory hotplug and need
@@ -7619,6 +7615,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
                .zone = page_zone(pfn_to_page(start)),
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
+               .no_set_skip_hint = true,
                .gfp_mask = current_gfp_context(gfp_mask),
        };
        INIT_LIST_HEAD(&cc.migratepages);
@@ -7655,11 +7652,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /*
         * In case of -EBUSY, we'd like to know which page causes problem.
-        * So, just fall through. We will check it in test_pages_isolated().
+        * So, just fall through. test_pages_isolated() has a tracepoint
+        * which will report the busy page.
+        *
+        * It is possible that busy pages could become available before
+        * the call to test_pages_isolated, and the range will actually be
+        * allocated.  So, if we fall through be sure to clear ret so that
+        * -EBUSY is not accidentally used or returned to caller.
         */
        ret = __alloc_contig_migrate_range(&cc, start, end);
        if (ret && ret != -EBUSY)
                goto done;
+       ret =0;
 
        /*
         * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
index 1f97d77551c3e539f303758693497705c424a4ab..7fbe67be86fa816b13d06603c098ccf10cca4fe2 100644 (file)
@@ -3202,7 +3202,6 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
        int len;
        struct inode *inode;
        struct page *page;
-       struct shmem_inode_info *info;
 
        len = strlen(symname) + 1;
        if (len > PAGE_SIZE)
@@ -3222,7 +3221,6 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                error = 0;
        }
 
-       info = SHMEM_I(inode);
        inode->i_size = len-1;
        if (len <= SHORT_SYMLINK_LEN) {
                inode->i_link = kmemdup(symname, len, GFP_KERNEL);
@@ -3778,7 +3776,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
         * tmpfs instance, limiting inodes to one per page of lowmem;
         * but the internal instance is left unlimited.
         */
-       if (!(sb->s_flags & MS_KERNMOUNT)) {
+       if (!(sb->s_flags & SB_KERNMOUNT)) {
                sbinfo->max_blocks = shmem_default_max_blocks();
                sbinfo->max_inodes = shmem_default_max_inodes();
                if (shmem_parse_options(data, sbinfo, false)) {
@@ -3786,12 +3784,12 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed;
                }
        } else {
-               sb->s_flags |= MS_NOUSER;
+               sb->s_flags |= SB_NOUSER;
        }
        sb->s_export_op = &shmem_export_ops;
-       sb->s_flags |= MS_NOSEC;
+       sb->s_flags |= SB_NOSEC;
 #else
-       sb->s_flags |= MS_NOUSER;
+       sb->s_flags |= SB_NOUSER;
 #endif
 
        spin_lock_init(&sbinfo->stat_lock);
@@ -3811,7 +3809,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_xattr = shmem_xattr_handlers;
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
-       sb->s_flags |= MS_POSIXACL;
+       sb->s_flags |= SB_POSIXACL;
 #endif
        uuid_gen(&sb->s_uuid);
 
index b2ba2ba585f3c0ccb422531ae9dc26909ae463d4..39e19125d6a019439e5795cbf9be5d54ad2d3349 100644 (file)
@@ -404,8 +404,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
                WARN_ON(z3fold_page_trylock(zhdr));
        else
                z3fold_page_lock(zhdr);
-       if (test_bit(PAGE_STALE, &page->private) ||
-           !test_and_clear_bit(NEEDS_COMPACTING, &page->private)) {
+       if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
                z3fold_page_unlock(zhdr);
                return;
        }
@@ -413,6 +412,11 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
        list_del_init(&zhdr->buddy);
        spin_unlock(&pool->lock);
 
+       if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
+               atomic64_dec(&pool->pages_nr);
+               return;
+       }
+
        z3fold_compact_page(zhdr);
        unbuddied = get_cpu_ptr(pool->unbuddied);
        fchunks = num_free_chunks(zhdr);
@@ -753,9 +757,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                list_del_init(&zhdr->buddy);
                spin_unlock(&pool->lock);
                zhdr->cpu = -1;
+               kref_get(&zhdr->refcount);
                do_compact_page(zhdr, true);
                return;
        }
+       kref_get(&zhdr->refcount);
        queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
        z3fold_page_unlock(zhdr);
 }
index 2dac647ff4201fc1dfcf98d05b398422fea7d6d5..7f50d47470bd450566b448b0a3e6ad6fe6d0a000 100644 (file)
@@ -401,9 +401,9 @@ static void garp_join_timer_arm(struct garp_applicant *app)
        mod_timer(&app->join_timer, jiffies + delay);
 }
 
-static void garp_join_timer(unsigned long data)
+static void garp_join_timer(struct timer_list *t)
 {
-       struct garp_applicant *app = (struct garp_applicant *)data;
+       struct garp_applicant *app = from_timer(app, t, join_timer);
 
        spin_lock(&app->lock);
        garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
@@ -584,7 +584,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
        spin_lock_init(&app->lock);
        skb_queue_head_init(&app->queue);
        rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
-       setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app);
+       timer_setup(&app->join_timer, garp_join_timer, 0);
        garp_join_timer_arm(app);
        return 0;
 
index be4dd31653474fbe806d17458773cc587aa56606..a808dd5bbb27a7ff84a1457315dad7d21e87e3c9 100644 (file)
@@ -586,9 +586,9 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
        mod_timer(&app->join_timer, jiffies + delay);
 }
 
-static void mrp_join_timer(unsigned long data)
+static void mrp_join_timer(struct timer_list *t)
 {
-       struct mrp_applicant *app = (struct mrp_applicant *)data;
+       struct mrp_applicant *app = from_timer(app, t, join_timer);
 
        spin_lock(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
@@ -605,9 +605,9 @@ static void mrp_periodic_timer_arm(struct mrp_applicant *app)
                  jiffies + msecs_to_jiffies(mrp_periodic_time));
 }
 
-static void mrp_periodic_timer(unsigned long data)
+static void mrp_periodic_timer(struct timer_list *t)
 {
-       struct mrp_applicant *app = (struct mrp_applicant *)data;
+       struct mrp_applicant *app = from_timer(app, t, periodic_timer);
 
        spin_lock(&app->lock);
        mrp_mad_event(app, MRP_EVENT_PERIODIC);
@@ -865,10 +865,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
        spin_lock_init(&app->lock);
        skb_queue_head_init(&app->queue);
        rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
-       setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
+       timer_setup(&app->join_timer, mrp_join_timer, 0);
        mrp_join_timer_arm(app);
-       setup_timer(&app->periodic_timer, mrp_periodic_timer,
-                   (unsigned long)app);
+       timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
        mrp_periodic_timer_arm(app);
        return 0;
 
index 4674235b0d9b1f77dae3736fd08b1a6a2a417eeb..b433aff5ff13ce9e10cddc204f97f9f19ae6c979 100644 (file)
@@ -82,7 +82,7 @@ int p9_show_client_options(struct seq_file *m, struct p9_client *clnt)
 {
        if (clnt->msize != 8192)
                seq_printf(m, ",msize=%u", clnt->msize);
-       seq_printf(m, "trans=%s", clnt->trans_mod->name);
+       seq_printf(m, ",trans=%s", clnt->trans_mod->name);
 
        switch (clnt->proto_version) {
        case p9_proto_legacy:
@@ -773,8 +773,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
        }
 again:
        /* Wait for the response */
-       err = wait_event_interruptible(*req->wq,
-                                      req->status >= REQ_STATUS_RCVD);
+       err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
 
        /*
         * Make sure our req is coherent with regard to updates in other
index 903a190319b94d2c09cce263684017f61d4e6d22..985046ae42312e86505d6fded2fb56501c38536b 100644 (file)
@@ -724,12 +724,12 @@ static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
 {
        if (clnt->trans_mod == &p9_tcp_trans) {
                if (clnt->trans_opts.tcp.port != P9_PORT)
-                       seq_printf(m, "port=%u", clnt->trans_opts.tcp.port);
+                       seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
        } else if (clnt->trans_mod == &p9_fd_trans) {
                if (clnt->trans_opts.fd.rfd != ~0)
-                       seq_printf(m, "rfd=%u", clnt->trans_opts.fd.rfd);
+                       seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
                if (clnt->trans_opts.fd.wfd != ~0)
-                       seq_printf(m, "wfd=%u", clnt->trans_opts.fd.wfd);
+                       seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
        }
        return 0;
 }
index f24b25c25106fb55fb713b7308a8d43413a2143b..f3a4efcf1456422a6c6f036e8b7364f89b407bfd 100644 (file)
@@ -286,8 +286,8 @@ req_retry:
                if (err == -ENOSPC) {
                        chan->ring_bufs_avail = 0;
                        spin_unlock_irqrestore(&chan->lock, flags);
-                       err = wait_event_interruptible(*chan->vc_wq,
-                                                       chan->ring_bufs_avail);
+                       err = wait_event_killable(*chan->vc_wq,
+                                                 chan->ring_bufs_avail);
                        if (err  == -ERESTARTSYS)
                                return err;
 
@@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
                 * Other zc request to finish here
                 */
                if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
-                       err = wait_event_interruptible(vp_wq,
+                       err = wait_event_killable(vp_wq,
                              (atomic_read(&vp_pinned) < chan->p9_max_pages));
                        if (err == -ERESTARTSYS)
                                return err;
@@ -471,8 +471,8 @@ req_retry_pinned:
                if (err == -ENOSPC) {
                        chan->ring_bufs_avail = 0;
                        spin_unlock_irqrestore(&chan->lock, flags);
-                       err = wait_event_interruptible(*chan->vc_wq,
-                                                      chan->ring_bufs_avail);
+                       err = wait_event_killable(*chan->vc_wq,
+                                                 chan->ring_bufs_avail);
                        if (err  == -ERESTARTSYS)
                                goto err_out;
 
@@ -489,8 +489,7 @@ req_retry_pinned:
        virtqueue_kick(chan->vq);
        spin_unlock_irqrestore(&chan->lock, flags);
        p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
-       err = wait_event_interruptible(*req->wq,
-                                      req->status >= REQ_STATUS_RCVD);
+       err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
        /*
         * Non kernel buffers are pinned, unpin them
         */
index 6ad3e043c6174ae97a82525688988e740d87fd29..325c56043007d89886203626a2a139f1ab8fc52a 100644 (file)
@@ -156,8 +156,8 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
        ring = &priv->rings[num];
 
 again:
-       while (wait_event_interruptible(ring->wq,
-                                       p9_xen_write_todo(ring, size)) != 0)
+       while (wait_event_killable(ring->wq,
+                                  p9_xen_write_todo(ring, size)) != 0)
                ;
 
        spin_lock_irqsave(&ring->lock, flags);
index 8ad3ec2610b6499b92b2f3bc97ac02d2d043dd45..309d7dbb36e8476cff412b6be73f93926cf1ec95 100644 (file)
@@ -310,7 +310,7 @@ static void __aarp_expire_device(struct aarp_entry **n, struct net_device *dev)
 }
 
 /* Handle the timer event */
-static void aarp_expire_timeout(unsigned long unused)
+static void aarp_expire_timeout(struct timer_list *unused)
 {
        int ct;
 
@@ -884,7 +884,7 @@ void __init aarp_proto_init(void)
        aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
        if (!aarp_dl)
                printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
-       setup_timer(&aarp_timer, aarp_expire_timeout, 0);
+       timer_setup(&aarp_timer, aarp_expire_timeout, 0);
        aarp_timer.expires  = jiffies + sysctl_aarp_expiry_time;
        add_timer(&aarp_timer);
        register_netdevice_notifier(&aarp_notifier);
index 5d035c1f1156e45540a6bf935341e5799b11ca85..03a9fc0771c084f04ad782c15a502bca13301e2b 100644 (file)
@@ -158,9 +158,9 @@ found:
        return s;
 }
 
-static void atalk_destroy_timer(unsigned long data)
+static void atalk_destroy_timer(struct timer_list *t)
 {
-       struct sock *sk = (struct sock *)data;
+       struct sock *sk = from_timer(sk, t, sk_timer);
 
        if (sk_has_allocations(sk)) {
                sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
@@ -175,8 +175,7 @@ static inline void atalk_destroy_socket(struct sock *sk)
        skb_queue_purge(&sk->sk_receive_queue);
 
        if (sk_has_allocations(sk)) {
-               setup_timer(&sk->sk_timer, atalk_destroy_timer,
-                               (unsigned long)sk);
+               timer_setup(&sk->sk_timer, atalk_destroy_timer, 0);
                sk->sk_timer.expires    = jiffies + SOCK_DESTROY_TIME;
                add_timer(&sk->sk_timer);
        } else
index c976196da3ea1b6a218d19c38475809c8dc3cd96..6676e34332616a1c867a4c8864919fd8bf11b43c 100644 (file)
@@ -1798,7 +1798,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
                else
                        send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL);
                entry->timer.expires = jiffies + (1 * HZ);
-               entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_arp;
+               entry->timer.function = lec_arp_expire_arp;
                add_timer(&entry->timer);
                found = priv->mcast_vcc;
        }
@@ -1998,7 +1998,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
                entry->old_recv_push = old_push;
                entry->status = ESI_UNKNOWN;
                entry->timer.expires = jiffies + priv->vcc_timeout_period;
-               entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+               entry->timer.function = lec_arp_expire_vcc;
                hlist_add_head(&entry->next, &priv->lec_no_forward);
                add_timer(&entry->timer);
                dump_arp_table(priv);
@@ -2082,7 +2082,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
        entry->status = ESI_UNKNOWN;
        hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
        entry->timer.expires = jiffies + priv->vcc_timeout_period;
-       entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+       entry->timer.function = lec_arp_expire_vcc;
        add_timer(&entry->timer);
        pr_debug("After vcc was added\n");
        dump_arp_table(priv);
index e882d8b5db05e889be00fe26a0595458ead470a4..7c6a1cc760a2d075aade51a143cba668a52397dd 100644 (file)
@@ -121,7 +121,7 @@ static struct notifier_block mpoa_notifier = {
 
 struct mpoa_client *mpcs = NULL; /* FIXME */
 static struct atm_mpoa_qos *qos_head = NULL;
-static DEFINE_TIMER(mpc_timer, NULL);
+static DEFINE_TIMER(mpc_timer, mpc_cache_check);
 
 
 static struct mpoa_client *find_mpc_by_itfnum(int itf)
@@ -1413,7 +1413,6 @@ static void mpc_timer_refresh(void)
 {
        mpc_timer.expires = jiffies + (MPC_P2 * HZ);
        checking_time = mpc_timer.expires;
-       mpc_timer.function = (TIMER_FUNC_TYPE)mpc_cache_check;
        add_timer(&mpc_timer);
 }
 
index 4b90033f35a851eb33612a4e09d39de6fc8066ad..15cd2139381e17f0b501dd5294631a1a3315c587 100644 (file)
@@ -488,9 +488,9 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
  * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
  * reset the cwnd to 3*MSS
  */
-static void batadv_tp_sender_timeout(unsigned long arg)
+static void batadv_tp_sender_timeout(struct timer_list *t)
 {
-       struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+       struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
        struct batadv_priv *bat_priv = tp_vars->bat_priv;
 
        if (atomic_read(&tp_vars->sending) == 0)
@@ -1020,8 +1020,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
        atomic64_set(&tp_vars->tot_sent, 0);
 
        kref_get(&tp_vars->refcount);
-       setup_timer(&tp_vars->timer, batadv_tp_sender_timeout,
-                   (unsigned long)tp_vars);
+       timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
 
        tp_vars->bat_priv = bat_priv;
        tp_vars->start_time = jiffies;
@@ -1109,9 +1108,9 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
  *  reached without received ack
  * @arg: address of the related tp_vars
  */
-static void batadv_tp_receiver_shutdown(unsigned long arg)
+static void batadv_tp_receiver_shutdown(struct timer_list *t)
 {
-       struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+       struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
        struct batadv_tp_unacked *un, *safe;
        struct batadv_priv *bat_priv;
 
@@ -1373,8 +1372,7 @@ batadv_tp_init_recv(struct batadv_priv *bat_priv,
        hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
 
        kref_get(&tp_vars->refcount);
-       setup_timer(&tp_vars->timer, batadv_tp_receiver_shutdown,
-                   (unsigned long)tp_vars);
+       timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
 
        batadv_tp_reset_receiver_timer(tp_vars);
 
index 8112893037bdc0afee1247dad7ac6433ac0168bb..f2cec70d520cc2b29606f51a5c2b3c19a7fdc838 100644 (file)
@@ -398,9 +398,9 @@ static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum,
        }
 }
 
-static void hidp_idle_timeout(unsigned long arg)
+static void hidp_idle_timeout(struct timer_list *t)
 {
-       struct hidp_session *session = (struct hidp_session *) arg;
+       struct hidp_session *session = from_timer(session, t, timer);
 
        /* The HIDP user-space API only contains calls to add and remove
         * devices. There is no way to forward events of any kind. Therefore,
@@ -944,8 +944,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
 
        /* device management */
        INIT_WORK(&session->dev_init, hidp_session_dev_work);
-       setup_timer(&session->timer, hidp_idle_timeout,
-                   (unsigned long)session);
+       timer_setup(&session->timer, hidp_idle_timeout, 0);
 
        /* session data */
        mutex_init(&session->report_mutex);
index 4a0b41d75c84833c89fdcd8a4387597cb99df0d2..b98225d65e87a34de2773c41a29ffc15a19db471 100644 (file)
@@ -233,9 +233,9 @@ static int rfcomm_check_security(struct rfcomm_dlc *d)
                                 d->out);
 }
 
-static void rfcomm_session_timeout(unsigned long arg)
+static void rfcomm_session_timeout(struct timer_list *t)
 {
-       struct rfcomm_session *s = (void *) arg;
+       struct rfcomm_session *s = from_timer(s, t, timer);
 
        BT_DBG("session %p state %ld", s, s->state);
 
@@ -258,9 +258,9 @@ static void rfcomm_session_clear_timer(struct rfcomm_session *s)
 }
 
 /* ---- RFCOMM DLCs ---- */
-static void rfcomm_dlc_timeout(unsigned long arg)
+static void rfcomm_dlc_timeout(struct timer_list *t)
 {
-       struct rfcomm_dlc *d = (void *) arg;
+       struct rfcomm_dlc *d = from_timer(d, t, timer);
 
        BT_DBG("dlc %p state %ld", d, d->state);
 
@@ -307,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
        if (!d)
                return NULL;
 
-       setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
+       timer_setup(&d->timer, rfcomm_dlc_timeout, 0);
 
        skb_queue_head_init(&d->tx_queue);
        mutex_init(&d->lock);
@@ -650,7 +650,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
 
        BT_DBG("session %p sock %p", s, sock);
 
-       setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
+       timer_setup(&s->timer, rfcomm_session_timeout, 0);
 
        INIT_LIST_HEAD(&s->dlcs);
        s->state = state;
index 795e920a3281939f8f84e76f9f3fc6161146a558..08df57665e1ff62fd3714598c5911833fe24a55b 100644 (file)
@@ -73,9 +73,9 @@ struct sco_pinfo {
 #define SCO_CONN_TIMEOUT       (HZ * 40)
 #define SCO_DISCONN_TIMEOUT    (HZ * 2)
 
-static void sco_sock_timeout(unsigned long arg)
+static void sco_sock_timeout(struct timer_list *t)
 {
-       struct sock *sk = (struct sock *)arg;
+       struct sock *sk = from_timer(sk, t, sk_timer);
 
        BT_DBG("sock %p state %d", sk, sk->sk_state);
 
@@ -487,7 +487,7 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
 
        sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
 
-       setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
+       timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
 
        bt_sock_link(&sco_sk_list, sk);
        return sk;
index d979b3dc49a6b9a2ef9e1ee9c262c9bb9bd9f253..0c59f876fe6f0c48bcf06adf7178d1d3e1528c77 100644 (file)
@@ -221,7 +221,7 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
 
        seq_putc(m, '\n');
 
-       if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+       if (net->can.can_stattimer.function == can_stat_update) {
                seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
                                can_stats->total_rx_match_ratio);
 
@@ -291,7 +291,7 @@ static int can_reset_stats_proc_show(struct seq_file *m, void *v)
 
        user_reset = 1;
 
-       if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+       if (net->can.can_stattimer.function == can_stat_update) {
                seq_printf(m, "Scheduled statistic reset #%ld.\n",
                                can_pstats->stats_reset + 1);
        } else {
index 67bb1f11e613ca4bcd4f635e096c00c9a61804a5..9a5850f264ed2fa1396fbb35db002e9e50309dbd 100644 (file)
@@ -47,28 +47,38 @@ unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
 
        /* handle the last 11 bytes */
        c = c + length;
-       switch (len) {            /* all the case statements fall through */
+       switch (len) {
        case 11:
                c = c + ((__u32)k[10] << 24);
+               /* fall through */
        case 10:
                c = c + ((__u32)k[9] << 16);
+               /* fall through */
        case 9:
                c = c + ((__u32)k[8] << 8);
                /* the first byte of c is reserved for the length */
+               /* fall through */
        case 8:
                b = b + ((__u32)k[7] << 24);
+               /* fall through */
        case 7:
                b = b + ((__u32)k[6] << 16);
+               /* fall through */
        case 6:
                b = b + ((__u32)k[5] << 8);
+               /* fall through */
        case 5:
                b = b + k[4];
+               /* fall through */
        case 4:
                a = a + ((__u32)k[3] << 24);
+               /* fall through */
        case 3:
                a = a + ((__u32)k[2] << 16);
+               /* fall through */
        case 2:
                a = a + ((__u32)k[1] << 8);
+               /* fall through */
        case 1:
                a = a + k[0];
                /* case 0: nothing left to add */
index 489610ac1cddad2e284515def1bb2185f620d3aa..bf9d079cbafd6e89d56ef5f10d81727bd5cbd42b 100644 (file)
@@ -37,7 +37,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
                return -ENOTSUPP;
        }
 
-       WARN_ON(!key->len);
+       if (!key->len)
+               return -EINVAL;
+
        key->key = kmemdup(buf, key->len, GFP_NOIO);
        if (!key->key) {
                ret = -ENOMEM;
index ad93342c90d72cad59d4797608940b293f2a71fa..8a4d3758030b73d3b9ca24b09f91b1e65be0844e 100644 (file)
@@ -430,6 +430,7 @@ static void ceph_sock_state_change(struct sock *sk)
        switch (sk->sk_state) {
        case TCP_CLOSE:
                dout("%s TCP_CLOSE\n", __func__);
+               /* fall through */
        case TCP_CLOSE_WAIT:
                dout("%s TCP_CLOSE_WAIT\n", __func__);
                con_sock_state_closing(con);
index 9ae1bab8c05db7005d5c345dcb62f6e4b9e145fb..1547107f48544e9690319e4fe9415968448a9f87 100644 (file)
@@ -1279,9 +1279,10 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
 
                /*
                 * Older OSDs don't set reply tid even if the orignal
-                * request had a non-zero tid.  Workaround this weirdness
-                * by falling through to the allocate case.
+                * request had a non-zero tid.  Work around this weirdness
+                * by allocating a new message.
                 */
+               /* fall through */
        case CEPH_MSG_MON_MAP:
        case CEPH_MSG_MDS_MAP:
        case CEPH_MSG_OSD_MAP:
index 8ee29f4f5fa91894e63734cfee3ee6909fd21b26..07ed21d64f92b39da9b683aa432efde6a14afdf0 100644 (file)
@@ -2746,7 +2746,8 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
 {
        if (tx_path)
-               return skb->ip_summed != CHECKSUM_PARTIAL;
+               return skb->ip_summed != CHECKSUM_PARTIAL &&
+                      skb->ip_summed != CHECKSUM_UNNECESSARY;
 
        return skb->ip_summed == CHECKSUM_NONE;
 }
@@ -7139,13 +7140,17 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                    __dev_xdp_attached(dev, bpf_op, NULL))
                        return -EBUSY;
 
-               if (bpf_op == ops->ndo_bpf)
-                       prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
-                                                    dev);
-               else
-                       prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
+               prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
+                                            bpf_op == ops->ndo_bpf);
                if (IS_ERR(prog))
                        return PTR_ERR(prog);
+
+               if (!(flags & XDP_FLAGS_HW_MODE) &&
+                   bpf_prog_is_dev_bound(prog->aux)) {
+                       NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
+                       bpf_prog_put(prog);
+                       return -EINVAL;
+               }
        }
 
        err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
index 70ccda233bd1f1aab18535e6d9d0419bb9a1a23b..c7785efeea577594b8e5ed0a74fb7b8b924ebae2 100644 (file)
@@ -144,9 +144,9 @@ static void send_dm_alert(struct work_struct *work)
  * in the event that more drops will arrive during the
  * hysteresis period.
  */
-static void sched_send_work(unsigned long _data)
+static void sched_send_work(struct timer_list *t)
 {
-       struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+       struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
 
        schedule_work(&data->dm_alert_work);
 }
@@ -412,8 +412,7 @@ static int __init init_net_drop_monitor(void)
        for_each_possible_cpu(cpu) {
                data = &per_cpu(dm_cpu_data, cpu);
                INIT_WORK(&data->dm_alert_work, send_dm_alert);
-               setup_timer(&data->send_timer, sched_send_work,
-                           (unsigned long)data);
+               timer_setup(&data->send_timer, sched_send_work, 0);
                spin_lock_init(&data->lock);
                reset_per_cpu_data(data);
        }
index 1afa17935954b71ff6ae30d84511a0023afa9cc8..6a85e67fafce224b534dd87bb9407ae115f8ba7a 100644 (file)
@@ -1646,9 +1646,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
        .gpl_only       = false,
        .pkt_access     = true,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_MEM,
+       .arg1_type      = ARG_PTR_TO_MEM_OR_NULL,
        .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
-       .arg3_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_PTR_TO_MEM_OR_NULL,
        .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
        .arg5_type      = ARG_ANYTHING,
 };
index 7c1ffd6f950172c1915d8e5fa2b5e3f77e4f4c78..9834cfa21b21168a7654290dc2a999e41937b534 100644 (file)
@@ -76,9 +76,9 @@ static void est_fetch_counters(struct net_rate_estimator *e,
 
 }
 
-static void est_timer(unsigned long arg)
+static void est_timer(struct timer_list *t)
 {
-       struct net_rate_estimator *est = (struct net_rate_estimator *)arg;
+       struct net_rate_estimator *est = from_timer(est, t, timer);
        struct gnet_stats_basic_packed b;
        u64 rate, brate;
 
@@ -170,7 +170,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        }
 
        est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
-       setup_timer(&est->timer, est_timer, (unsigned long)est);
+       timer_setup(&est->timer, est_timer, 0);
        mod_timer(&est->timer, est->next_jiffies);
 
        rcu_assign_pointer(*rate_est, est);
index 6ea3a1a7f36a2e2d35ee170756aca0e0d6fc5120..d1f5fe986edda5ff886575be0eea0b361e2be7ff 100644 (file)
@@ -51,7 +51,7 @@ do {                                          \
 
 #define PNEIGH_HASHMASK                0xF
 
-static void neigh_timer_handler(unsigned long arg);
+static void neigh_timer_handler(struct timer_list *t);
 static void __neigh_notify(struct neighbour *n, int type, int flags,
                           u32 pid);
 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
@@ -331,7 +331,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
        n->output         = neigh_blackhole;
        seqlock_init(&n->hh.hh_lock);
        n->parms          = neigh_parms_clone(&tbl->parms);
-       setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
+       timer_setup(&n->timer, neigh_timer_handler, 0);
 
        NEIGH_CACHE_STAT_INC(tbl, allocs);
        n->tbl            = tbl;
@@ -903,10 +903,10 @@ static void neigh_probe(struct neighbour *neigh)
 
 /* Called when a timer expires for a neighbour entry. */
 
-static void neigh_timer_handler(unsigned long arg)
+static void neigh_timer_handler(struct timer_list *t)
 {
        unsigned long now, next;
-       struct neighbour *neigh = (struct neighbour *)arg;
+       struct neighbour *neigh = from_timer(neigh, t, timer);
        unsigned int state;
        int notify = 0;
 
@@ -1391,9 +1391,9 @@ int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(neigh_direct_output);
 
-static void neigh_proxy_process(unsigned long arg)
+static void neigh_proxy_process(struct timer_list *t)
 {
-       struct neigh_table *tbl = (struct neigh_table *)arg;
+       struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
        long sched_next = 0;
        unsigned long now = jiffies;
        struct sk_buff *skb, *n;
@@ -1573,7 +1573,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
        INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
        queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
                        tbl->parms.reachable_time);
-       setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
+       timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
        skb_queue_head_init_class(&tbl->proxy_queue,
                        &neigh_table_proxy_queue_class);
 
index b36dceab0dc12000a73e6fec63e28ffa98691f59..324cb9f2f55146a46ef78528fa5ac768e22736b9 100644 (file)
@@ -125,7 +125,7 @@ static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
                                             struct sk_buff *skb,
                                             const void *daddr);
 static int dn_route_input(struct sk_buff *);
-static void dn_run_flush(unsigned long dummy);
+static void dn_run_flush(struct timer_list *unused);
 
 static struct dn_rt_hash_bucket *dn_rt_hash_table;
 static unsigned int dn_rt_hash_mask;
@@ -183,7 +183,7 @@ static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
        return dn_rt_hash_mask & (unsigned int)tmp;
 }
 
-static void dn_dst_check_expire(unsigned long dummy)
+static void dn_dst_check_expire(struct timer_list *unused)
 {
        int i;
        struct dn_route *rt;
@@ -357,7 +357,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
        return 0;
 }
 
-static void dn_run_flush(unsigned long dummy)
+static void dn_run_flush(struct timer_list *unused)
 {
        int i;
        struct dn_route *rt, *next;
@@ -1875,7 +1875,7 @@ void __init dn_route_init(void)
                kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
                                  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
        dst_entries_init(&dn_dst_ops);
-       setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
+       timer_setup(&dn_route_timer, dn_dst_check_expire, 0);
        dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
        add_timer(&dn_route_timer);
 
index f430daed24a0d97a97fe625456fbe8fc7fa22ce3..aa4155875ca84eabb75ab445c4f1adf2612eeac2 100644 (file)
 
 #define SLOW_INTERVAL (HZ/2)
 
-static void dn_slow_timer(unsigned long arg);
+static void dn_slow_timer(struct timer_list *t);
 
 void dn_start_slow_timer(struct sock *sk)
 {
-       setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+       timer_setup(&sk->sk_timer, dn_slow_timer, 0);
        sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 }
 
@@ -47,9 +47,9 @@ void dn_stop_slow_timer(struct sock *sk)
        sk_stop_timer(sk, &sk->sk_timer);
 }
 
-static void dn_slow_timer(unsigned long arg)
+static void dn_slow_timer(struct timer_list *t)
 {
-       struct sock *sk = (struct sock *)arg;
+       struct sock *sk = from_timer(sk, t, sk_timer);
        struct dn_scp *scp = DN_SK(sk);
 
        bh_lock_sock(sk);
index 44e3fb7dec8cfa1b8d3da54590238e2cacc37782..1e287420ff49116ff96f7cac21016d6e2713a0b6 100644 (file)
@@ -51,9 +51,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
        INIT_LIST_HEAD(&dst->list);
        list_add_tail(&dsa_tree_list, &dst->list);
 
-       /* Initialize the reference counter to the number of switches, not 1 */
        kref_init(&dst->refcount);
-       refcount_set(&dst->refcount.refcount, 0);
 
        return dst;
 }
@@ -64,20 +62,23 @@ static void dsa_tree_free(struct dsa_switch_tree *dst)
        kfree(dst);
 }
 
-static struct dsa_switch_tree *dsa_tree_touch(int index)
+static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
 {
-       struct dsa_switch_tree *dst;
-
-       dst = dsa_tree_find(index);
-       if (!dst)
-               dst = dsa_tree_alloc(index);
+       if (dst)
+               kref_get(&dst->refcount);
 
        return dst;
 }
 
-static void dsa_tree_get(struct dsa_switch_tree *dst)
+static struct dsa_switch_tree *dsa_tree_touch(int index)
 {
-       kref_get(&dst->refcount);
+       struct dsa_switch_tree *dst;
+
+       dst = dsa_tree_find(index);
+       if (dst)
+               return dsa_tree_get(dst);
+       else
+               return dsa_tree_alloc(index);
 }
 
 static void dsa_tree_release(struct kref *ref)
@@ -91,7 +92,8 @@ static void dsa_tree_release(struct kref *ref)
 
 static void dsa_tree_put(struct dsa_switch_tree *dst)
 {
-       kref_put(&dst->refcount, dsa_tree_release);
+       if (dst)
+               kref_put(&dst->refcount, dsa_tree_release);
 }
 
 static bool dsa_port_is_dsa(struct dsa_port *port)
@@ -765,6 +767,7 @@ int dsa_register_switch(struct dsa_switch *ds)
 
        mutex_lock(&dsa2_mutex);
        err = dsa_switch_probe(ds);
+       dsa_tree_put(ds->dst);
        mutex_unlock(&dsa2_mutex);
 
        return err;
index ce4aa827be059fe83ec36f3fe1c31f2068a95d8b..f00499a469271fb2165c8ca25fe3b4538e45a01e 100644 (file)
@@ -1223,9 +1223,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                 netdev_features_t features)
 {
-       bool fixedid = false, gso_partial, encap;
+       bool udpfrag = false, fixedid = false, gso_partial, encap;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
+       unsigned int offset = 0;
        struct iphdr *iph;
        int proto, tot_len;
        int nhoff;
@@ -1260,6 +1261,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
        if (!skb->encapsulation || encap) {
+               udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
                fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
 
                /* fixed ID is invalid if DF bit is not set */
@@ -1279,7 +1281,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        skb = segs;
        do {
                iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
-               if (skb_is_gso(skb)) {
+               if (udpfrag) {
+                       iph->frag_off = htons(offset >> 3);
+                       if (skb->next)
+                               iph->frag_off |= htons(IP_MF);
+                       offset += skb->len - nhoff - ihl;
+                       tot_len = skb->len - nhoff;
+               } else if (skb_is_gso(skb)) {
                        if (!fixedid) {
                                iph->id = htons(id);
                                id += skb_shinfo(skb)->gso_segs;
index ab183af0b5b6a8f9b7fd02b32b56d32487518f7a..d1f8f302dbf3ed5a079f27efa6eeaf802de40243 100644 (file)
@@ -752,18 +752,18 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        return ip_local_out(net, skb->sk, skb);
 }
 
-static void igmp_gq_timer_expire(unsigned long data)
+static void igmp_gq_timer_expire(struct timer_list *t)
 {
-       struct in_device *in_dev = (struct in_device *)data;
+       struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
 
        in_dev->mr_gq_running = 0;
        igmpv3_send_report(in_dev, NULL);
        in_dev_put(in_dev);
 }
 
-static void igmp_ifc_timer_expire(unsigned long data)
+static void igmp_ifc_timer_expire(struct timer_list *t)
 {
-       struct in_device *in_dev = (struct in_device *)data;
+       struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
 
        igmpv3_send_cr(in_dev);
        if (in_dev->mr_ifc_count) {
@@ -784,9 +784,9 @@ static void igmp_ifc_event(struct in_device *in_dev)
 }
 
 
-static void igmp_timer_expire(unsigned long data)
+static void igmp_timer_expire(struct timer_list *t)
 {
-       struct ip_mc_list *im = (struct ip_mc_list *)data;
+       struct ip_mc_list *im = from_timer(im, t, timer);
        struct in_device *in_dev = im->interface;
 
        spin_lock(&im->lock);
@@ -1385,7 +1385,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        refcount_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
-       setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
+       timer_setup(&im->timer, igmp_timer_expire, 0);
        im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
 #endif
 
@@ -1695,10 +1695,8 @@ void ip_mc_init_dev(struct in_device *in_dev)
        ASSERT_RTNL();
 
 #ifdef CONFIG_IP_MULTICAST
-       setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
-                       (unsigned long)in_dev);
-       setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
-                       (unsigned long)in_dev);
+       timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
+       timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
        in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
 #endif
 
index 40a43ad294cb3751839cc1dbfc02a360e101d401..fd5f19c988e48a00e5447f5504bd670326ef7939 100644 (file)
@@ -112,7 +112,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
                                 int cmd);
 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
 static void mroute_clean_tables(struct mr_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
 #define ipmr_for_each_table(mrt, net) \
@@ -375,8 +375,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        INIT_LIST_HEAD(&mrt->mfc_cache_list);
        INIT_LIST_HEAD(&mrt->mfc_unres_queue);
 
-       setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
-                   (unsigned long)mrt);
+       timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
 
        mrt->mroute_reg_vif_num = -1;
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -804,9 +803,9 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
 }
 
 /* Timer process for the unresolved queue. */
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
 {
-       struct mr_table *mrt = (struct mr_table *)arg;
+       struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
        unsigned long now;
        unsigned long expires;
        struct mfc_cache *c, *next;
index 3b427757b1f8ecfee63c0f0667dfa0c38c1653ae..43b69af242e18d640db1bff451d2ee229fc5e08f 100644 (file)
@@ -651,9 +651,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
        struct fnhe_hash_bucket *hash;
        struct fib_nh_exception *fnhe;
        struct rtable *rt;
+       u32 genid, hval;
        unsigned int i;
        int depth;
-       u32 hval = fnhe_hashfun(daddr);
+
+       genid = fnhe_genid(dev_net(nh->nh_dev));
+       hval = fnhe_hashfun(daddr);
 
        spin_lock_bh(&fnhe_lock);
 
@@ -676,12 +679,13 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
        }
 
        if (fnhe) {
+               if (fnhe->fnhe_genid != genid)
+                       fnhe->fnhe_genid = genid;
                if (gw)
                        fnhe->fnhe_gw = gw;
-               if (pmtu) {
+               if (pmtu)
                        fnhe->fnhe_pmtu = pmtu;
-                       fnhe->fnhe_expires = max(1UL, expires);
-               }
+               fnhe->fnhe_expires = max(1UL, expires);
                /* Update all cached dsts too */
                rt = rcu_dereference(fnhe->fnhe_rth_input);
                if (rt)
@@ -700,7 +704,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
                        fnhe->fnhe_next = hash->chain;
                        rcu_assign_pointer(hash->chain, fnhe);
                }
-               fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
+               fnhe->fnhe_genid = genid;
                fnhe->fnhe_daddr = daddr;
                fnhe->fnhe_gw = gw;
                fnhe->fnhe_pmtu = pmtu;
index f844c06c0676c3e23ce9df97f0ccb80f1685127c..734cfc8ff76edf3453921b50620be2986bfcfdb9 100644 (file)
@@ -2964,7 +2964,7 @@ void tcp_rearm_rto(struct sock *sk)
 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
 static void tcp_set_xmit_timer(struct sock *sk)
 {
-       if (!tcp_schedule_loss_probe(sk))
+       if (!tcp_schedule_loss_probe(sk, true))
                tcp_rearm_rto(sk);
 }
 
index 540b7d92cc70b3ea4f91ecb307840166f7f4dbce..a4d214c7b506df70e4eb980a488880243986d836 100644 (file)
@@ -2391,7 +2391,7 @@ repair:
 
                /* Send one loss probe per tail loss episode. */
                if (push_one != 2)
-                       tcp_schedule_loss_probe(sk);
+                       tcp_schedule_loss_probe(sk, false);
                is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
                tcp_cwnd_validate(sk, is_cwnd_limited);
                return false;
@@ -2399,7 +2399,7 @@ repair:
        return !tp->packets_out && !tcp_write_queue_empty(sk);
 }
 
-bool tcp_schedule_loss_probe(struct sock *sk)
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2440,7 +2440,9 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        }
 
        /* If the RTO formula yields an earlier time, then use that time. */
-       rto_delta_us = tcp_rto_delta_us(sk);  /* How far in future is RTO? */
+       rto_delta_us = advancing_rto ?
+                       jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
+                       tcp_rto_delta_us(sk);  /* How far in future is RTO? */
        if (rto_delta_us > 0)
                timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
 
index e360d55be5554d1bee56d3f493752ba9ae2c8015..01801b77bd0da45764fd0e9a80f22b0e46633934 100644 (file)
@@ -187,16 +187,57 @@ out_unlock:
 }
 EXPORT_SYMBOL(skb_udp_tunnel_segment);
 
-static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb,
-                                          netdev_features_t features)
+static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+                                        netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       unsigned int mss;
+       __wsum csum;
+       struct udphdr *uh;
+       struct iphdr *iph;
 
        if (skb->encapsulation &&
            (skb_shinfo(skb)->gso_type &
-            (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)))
+            (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
                segs = skb_udp_tunnel_segment(skb, features, false);
+               goto out;
+       }
+
+       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+               goto out;
+
+       mss = skb_shinfo(skb)->gso_size;
+       if (unlikely(skb->len <= mss))
+               goto out;
+
+       /* Do software UFO. Complete and fill in the UDP checksum as
+        * HW cannot do checksum of UDP packets sent as multiple
+        * IP fragments.
+        */
 
+       uh = udp_hdr(skb);
+       iph = ip_hdr(skb);
+
+       uh->check = 0;
+       csum = skb_checksum(skb, 0, skb->len, 0);
+       uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
+       if (uh->check == 0)
+               uh->check = CSUM_MANGLED_0;
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       /* If there is no outer header we can fake a checksum offload
+        * due to the fact that we have already done the checksum in
+        * software prior to segmenting the frame.
+        */
+       if (!skb->encap_hdr_csum)
+               features |= NETIF_F_HW_CSUM;
+
+       /* Fragment the skb. IP headers of the fragments are updated in
+        * inet_gso_segment()
+        */
+       segs = skb_segment(skb, features);
+out:
        return segs;
 }
 
@@ -330,7 +371,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
 
 static const struct net_offload udpv4_offload = {
        .callbacks = {
-               .gso_segment = udp4_tunnel_segment,
+               .gso_segment = udp4_ufo_fragment,
                .gro_receive  = udp4_gro_receive,
                .gro_complete = udp4_gro_complete,
        },
index a0ae1c9d37dfc9712da564a084e6191d56ab48a1..f49bd7897e95f15a381e4700660991f2d3c3fed4 100644 (file)
@@ -188,7 +188,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_work(struct work_struct *w);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
 static void addrconf_dad_run(struct inet6_dev *idev);
-static void addrconf_rs_timer(unsigned long data);
+static void addrconf_rs_timer(struct timer_list *t);
 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
 
@@ -388,8 +388,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        rwlock_init(&ndev->lock);
        ndev->dev = dev;
        INIT_LIST_HEAD(&ndev->addr_list);
-       setup_timer(&ndev->rs_timer, addrconf_rs_timer,
-                   (unsigned long)ndev);
+       timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
        memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
 
        if (ndev->cnf.stable_secret.initialized)
@@ -3741,9 +3740,9 @@ restart:
        return 0;
 }
 
-static void addrconf_rs_timer(unsigned long data)
+static void addrconf_rs_timer(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, rs_timer);
        struct net_device *dev = idev->dev;
        struct in6_addr lladdr;
 
index 2e2804f5823e4ee0baeb42e0d7fdfce310397950..f5285f4e1d08acb60d42fb6fd10a0c38a239324f 100644 (file)
@@ -70,7 +70,7 @@ static int fib6_walk_continue(struct fib6_walker *w);
  *     result of redirects, path MTU changes, etc.
  */
 
-static void fib6_gc_timer_cb(unsigned long arg);
+static void fib6_gc_timer_cb(struct timer_list *t);
 
 #define FOR_WALKERS(net, w) \
        list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh)
@@ -2026,9 +2026,11 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
        spin_unlock_bh(&net->ipv6.fib6_gc_lock);
 }
 
-static void fib6_gc_timer_cb(unsigned long arg)
+static void fib6_gc_timer_cb(struct timer_list *t)
 {
-       fib6_run_gc(0, (struct net *)arg, true);
+       struct net *arg = from_timer(arg, t, ipv6.ip6_fib_timer);
+
+       fib6_run_gc(0, arg, true);
 }
 
 static int __net_init fib6_net_init(struct net *net)
@@ -2043,7 +2045,7 @@ static int __net_init fib6_net_init(struct net *net)
        spin_lock_init(&net->ipv6.fib6_gc_lock);
        rwlock_init(&net->ipv6.fib6_walker_lock);
        INIT_LIST_HEAD(&net->ipv6.fib6_walkers);
-       setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
+       timer_setup(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, 0);
 
        net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
        if (!net->ipv6.rt6_stats)
index 9f2e73c71768d917ff3ca0cb0e22aed28f710ce5..7f59c8fabeeb95e10e8315e7bb3363300469b77e 100644 (file)
@@ -46,7 +46,7 @@
 static atomic_t fl_size = ATOMIC_INIT(0);
 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
 
-static void ip6_fl_gc(unsigned long dummy);
+static void ip6_fl_gc(struct timer_list *unused);
 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
 
 /* FL hash table lock: it protects only of GC */
@@ -127,7 +127,7 @@ static void fl_release(struct ip6_flowlabel *fl)
        spin_unlock_bh(&ip6_fl_lock);
 }
 
-static void ip6_fl_gc(unsigned long dummy)
+static void ip6_fl_gc(struct timer_list *unused)
 {
        int i;
        unsigned long now = jiffies;
index b90bad7a4e56ee59033fae506eceffd6ae50a88f..4cfd8e0696fe77f6d7af7ca3579a2418aef972f6 100644 (file)
@@ -460,7 +460,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
                                      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
                                      tpi->proto);
        if (tunnel) {
-               ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
+               ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
 
                return PACKET_RCVD;
        }
index 9c24b85949c1060011774d9ff743b112206f186d..a2e1a864eb4695ee4323ce2f85f2a560efd73ee4 100644 (file)
@@ -120,7 +120,7 @@ static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
                               struct netlink_callback *cb);
 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
 
 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 #define ip6mr_for_each_table(mrt, net) \
@@ -320,8 +320,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
 
        INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
 
-       setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
-                   (unsigned long)mrt);
+       timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
 
 #ifdef CONFIG_IPV6_PIMSM_V2
        mrt->mroute_reg_vif_num = -1;
@@ -888,9 +887,9 @@ static void ipmr_do_expire_process(struct mr6_table *mrt)
                mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
 }
 
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
 {
-       struct mr6_table *mrt = (struct mr6_table *)arg;
+       struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
 
        if (!spin_trylock(&mfc_unres_lock)) {
                mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
index 12b7c27ce5ce917bfb49ec8a56502c02ee02edf6..fc6d7d143f2c29aab9a3f56eae02e5337e65a97b 100644 (file)
@@ -75,10 +75,10 @@ static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
 
 static void igmp6_join_group(struct ifmcaddr6 *ma);
 static void igmp6_leave_group(struct ifmcaddr6 *ma);
-static void igmp6_timer_handler(unsigned long data);
+static void igmp6_timer_handler(struct timer_list *t);
 
-static void mld_gq_timer_expire(unsigned long data);
-static void mld_ifc_timer_expire(unsigned long data);
+static void mld_gq_timer_expire(struct timer_list *t);
+static void mld_ifc_timer_expire(struct timer_list *t);
 static void mld_ifc_event(struct inet6_dev *idev);
 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
@@ -839,7 +839,7 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
        if (!mc)
                return NULL;
 
-       setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
+       timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
 
        mc->mca_addr = *addr;
        mc->idev = idev; /* reference taken by caller */
@@ -2083,9 +2083,9 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
        }
 }
 
-static void mld_dad_timer_expire(unsigned long data)
+static void mld_dad_timer_expire(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
 
        mld_send_initial_cr(idev);
        if (idev->mc_dad_count) {
@@ -2432,18 +2432,18 @@ static void igmp6_leave_group(struct ifmcaddr6 *ma)
        }
 }
 
-static void mld_gq_timer_expire(unsigned long data)
+static void mld_gq_timer_expire(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
 
        idev->mc_gq_running = 0;
        mld_send_report(idev, NULL);
        in6_dev_put(idev);
 }
 
-static void mld_ifc_timer_expire(unsigned long data)
+static void mld_ifc_timer_expire(struct timer_list *t)
 {
-       struct inet6_dev *idev = (struct inet6_dev *)data;
+       struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
 
        mld_send_cr(idev);
        if (idev->mc_ifc_count) {
@@ -2462,9 +2462,9 @@ static void mld_ifc_event(struct inet6_dev *idev)
        mld_ifc_start_timer(idev, 1);
 }
 
-static void igmp6_timer_handler(unsigned long data)
+static void igmp6_timer_handler(struct timer_list *t)
 {
-       struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
+       struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
 
        if (mld_in_v1_mode(ma->idev))
                igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
@@ -2552,14 +2552,11 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
        write_lock_bh(&idev->lock);
        spin_lock_init(&idev->mc_lock);
        idev->mc_gq_running = 0;
-       setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
-                       (unsigned long)idev);
+       timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
        idev->mc_tomb = NULL;
        idev->mc_ifc_count = 0;
-       setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
-                       (unsigned long)idev);
-       setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
-                   (unsigned long)idev);
+       timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
+       timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
        ipv6_mc_reset(idev);
        write_unlock_bh(&idev->lock);
 }
index 4a7e5ffa51083112fa3927cfe0c5f7d36cd60235..4fe7c90962ddae3356200376aa911bab6d75bb48 100644 (file)
@@ -31,6 +31,37 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
        return id;
 }
 
+/* This function exists only for tap drivers that must support broken
+ * clients requesting UFO without specifying an IPv6 fragment ID.
+ *
+ * This is similar to ipv6_select_ident() but we use an independent hash
+ * seed to limit information leakage.
+ *
+ * The network header must be set before calling this.
+ */
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
+{
+       static u32 ip6_proxy_idents_hashrnd __read_mostly;
+       struct in6_addr buf[2];
+       struct in6_addr *addrs;
+       u32 id;
+
+       addrs = skb_header_pointer(skb,
+                                  skb_network_offset(skb) +
+                                  offsetof(struct ipv6hdr, saddr),
+                                  sizeof(buf), buf);
+       if (!addrs)
+               return 0;
+
+       net_get_random_once(&ip6_proxy_idents_hashrnd,
+                           sizeof(ip6_proxy_idents_hashrnd));
+
+       id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
+                                &addrs[1], &addrs[0]);
+       return htonl(id);
+}
+EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+
 __be32 ipv6_select_ident(struct net *net,
                         const struct in6_addr *daddr,
                         const struct in6_addr *saddr)
index 05eb7bc36156a3e571f728ad33d61b1a41a81a1c..7a8d1500d374b4089e623ed2b20d68110cff498e 100644 (file)
@@ -472,6 +472,11 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
                                &match->rt6i_siblings, rt6i_siblings) {
                        route_choosen--;
                        if (route_choosen == 0) {
+                               struct inet6_dev *idev = sibling->rt6i_idev;
+
+                               if (!netif_carrier_ok(sibling->dst.dev) &&
+                                   idev->cnf.ignore_routes_with_linkdown)
+                                       break;
                                if (rt6_score_route(sibling, oif, strict) < 0)
                                        break;
                                match = sibling;
@@ -1019,7 +1024,7 @@ static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
 {
        struct net_device *dev = rt->dst.dev;
 
-       if (rt->rt6i_flags & RTF_LOCAL) {
+       if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
                /* for copies of local routes, dst->dev needs to be the
                 * device if it is a master device, the master device if
                 * device is enslaved, and the loopback as the default
index 455fd4e39333233289e9a844de512f200119ff1a..a0f89ad76f9d2233b9e048418069aacd92ac6a25 100644 (file)
 #include <net/ip6_checksum.h>
 #include "ip6_offload.h"
 
-static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb,
-                                          netdev_features_t features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+                                        netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       unsigned int mss;
+       unsigned int unfrag_ip6hlen, unfrag_len;
+       struct frag_hdr *fptr;
+       u8 *packet_start, *prevhdr;
+       u8 nexthdr;
+       u8 frag_hdr_sz = sizeof(struct frag_hdr);
+       __wsum csum;
+       int tnl_hlen;
+       int err;
+
+       mss = skb_shinfo(skb)->gso_size;
+       if (unlikely(skb->len <= mss))
+               goto out;
 
        if (skb->encapsulation && skb_shinfo(skb)->gso_type &
            (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
                segs = skb_udp_tunnel_segment(skb, features, true);
+       else {
+               const struct ipv6hdr *ipv6h;
+               struct udphdr *uh;
+
+               if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+                       goto out;
+
+               /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+                * do checksum of UDP packets sent as multiple IP fragments.
+                */
+
+               uh = udp_hdr(skb);
+               ipv6h = ipv6_hdr(skb);
+
+               uh->check = 0;
+               csum = skb_checksum(skb, 0, skb->len, 0);
+               uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
+                                         &ipv6h->daddr, csum);
+               if (uh->check == 0)
+                       uh->check = CSUM_MANGLED_0;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* If there is no outer header we can fake a checksum offload
+                * due to the fact that we have already done the checksum in
+                * software prior to segmenting the frame.
+                */
+               if (!skb->encap_hdr_csum)
+                       features |= NETIF_F_HW_CSUM;
+
+               /* Check if there is enough headroom to insert fragment header. */
+               tnl_hlen = skb_tnl_header_len(skb);
+               if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
+                       if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+                               goto out;
+               }
+
+               /* Find the unfragmentable header and shift it left by frag_hdr_sz
+                * bytes to insert fragment header.
+                */
+               err = ip6_find_1stfragopt(skb, &prevhdr);
+               if (err < 0)
+                       return ERR_PTR(err);
+               unfrag_ip6hlen = err;
+               nexthdr = *prevhdr;
+               *prevhdr = NEXTHDR_FRAGMENT;
+               unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+                            unfrag_ip6hlen + tnl_hlen;
+               packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+               memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+
+               SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
+               skb->mac_header -= frag_hdr_sz;
+               skb->network_header -= frag_hdr_sz;
+
+               fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+               fptr->nexthdr = nexthdr;
+               fptr->reserved = 0;
+               fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb);
+
+               /* Fragment the skb. ipv6 header and the remaining fields of the
+                * fragment header are updated in ipv6_gso_segment()
+                */
+               segs = skb_segment(skb, features);
+       }
 
+out:
        return segs;
 }
 
@@ -75,7 +154,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
 
 static const struct net_offload udpv6_offload = {
        .callbacks = {
-               .gso_segment    =       udp6_tunnel_segment,
+               .gso_segment    =       udp6_ufo_fragment,
                .gro_receive    =       udp6_gro_receive,
                .gro_complete   =       udp6_gro_complete,
        },
index 8bb469cb3abeb239a9b89e997dd1104a38c455aa..5d4ae01951b562c602fed13787719edfaad3c635 100644 (file)
@@ -42,7 +42,7 @@ void lapb_start_t1timer(struct lapb_cb *lapb)
 {
        del_timer(&lapb->t1timer);
 
-       lapb->t1timer.function = (TIMER_FUNC_TYPE)lapb_t1timer_expiry;
+       lapb->t1timer.function = lapb_t1timer_expiry;
        lapb->t1timer.expires  = jiffies + lapb->t1;
 
        add_timer(&lapb->t1timer);
@@ -52,7 +52,7 @@ void lapb_start_t2timer(struct lapb_cb *lapb)
 {
        del_timer(&lapb->t2timer);
 
-       lapb->t2timer.function = (TIMER_FUNC_TYPE)lapb_t2timer_expiry;
+       lapb->t2timer.function = lapb_t2timer_expiry;
        lapb->t2timer.expires  = jiffies + lapb->t2;
 
        add_timer(&lapb->t2timer);
index 88cc1ae935ead5f1a2b4a46d2d3af3fbf4cefd8d..d444752dbf40789cfb0d93d3d572b6bd04263671 100644 (file)
@@ -151,21 +151,17 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
  * After accepting the AddBA Request we activated a timer,
  * resetting it after each frame that arrives from the originator.
  */
-static void sta_rx_agg_session_timer_expired(unsigned long data)
+static void sta_rx_agg_session_timer_expired(struct timer_list *t)
 {
-       /* not an elegant detour, but there is no choice as the timer passes
-        * only one argument, and various sta_info are needed here, so init
-        * flow in sta_info_create gives the TID as data, while the timer_to_id
-        * array gives the sta through container_of */
-       u8 *ptid = (u8 *)data;
-       u8 *timer_to_id = ptid - *ptid;
-       struct sta_info *sta = container_of(timer_to_id, struct sta_info,
-                                        timer_to_tid[0]);
+       struct tid_ampdu_rx *tid_rx_timer =
+               from_timer(tid_rx_timer, t, session_timer);
+       struct sta_info *sta = tid_rx_timer->sta;
+       u8 tid = tid_rx_timer->tid;
        struct tid_ampdu_rx *tid_rx;
        unsigned long timeout;
 
        rcu_read_lock();
-       tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
+       tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
        if (!tid_rx) {
                rcu_read_unlock();
                return;
@@ -180,21 +176,18 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
        rcu_read_unlock();
 
        ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
-              sta->sta.addr, (u16)*ptid);
+              sta->sta.addr, tid);
 
-       set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
+       set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
        ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
 }
 
-static void sta_rx_agg_reorder_timer_expired(unsigned long data)
+static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
 {
-       u8 *ptid = (u8 *)data;
-       u8 *timer_to_id = ptid - *ptid;
-       struct sta_info *sta = container_of(timer_to_id, struct sta_info,
-                       timer_to_tid[0]);
+       struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer);
 
        rcu_read_lock();
-       ieee80211_release_reorder_timeout(sta, *ptid);
+       ieee80211_release_reorder_timeout(tid_rx->sta, tid_rx->tid);
        rcu_read_unlock();
 }
 
@@ -356,14 +349,12 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
        spin_lock_init(&tid_agg_rx->reorder_lock);
 
        /* rx timer */
-       setup_deferrable_timer(&tid_agg_rx->session_timer,
-                              sta_rx_agg_session_timer_expired,
-                              (unsigned long)&sta->timer_to_tid[tid]);
+       timer_setup(&tid_agg_rx->session_timer,
+                   sta_rx_agg_session_timer_expired, TIMER_DEFERRABLE);
 
        /* rx reorder timer */
-       setup_timer(&tid_agg_rx->reorder_timer,
-                   sta_rx_agg_reorder_timer_expired,
-                   (unsigned long)&sta->timer_to_tid[tid]);
+       timer_setup(&tid_agg_rx->reorder_timer,
+                   sta_rx_agg_reorder_timer_expired, 0);
 
        /* prepare reordering buffer */
        tid_agg_rx->reorder_buf =
@@ -399,6 +390,8 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
        tid_agg_rx->auto_seq = auto_seq;
        tid_agg_rx->started = false;
        tid_agg_rx->reorder_buf_filtered = 0;
+       tid_agg_rx->tid = tid;
+       tid_agg_rx->sta = sta;
        status = WLAN_STATUS_SUCCESS;
 
        /* activate it for RX */
index bef516ec47f94c19f57da37d80c744bb534deeb4..5f8ab5be369fe9705744473b5dc5928a643d50d1 100644 (file)
@@ -330,6 +330,11 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 
        spin_lock_bh(&sta->lock);
 
+       /* free struct pending for start, if present */
+       tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
+       kfree(tid_tx);
+       sta->ampdu_mlme.tid_start_tx[tid] = NULL;
+
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
        if (!tid_tx) {
                spin_unlock_bh(&sta->lock);
@@ -422,15 +427,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
  * add Block Ack response will arrive from the recipient.
  * If this timer expires sta_addba_resp_timer_expired will be executed.
  */
-static void sta_addba_resp_timer_expired(unsigned long data)
+static void sta_addba_resp_timer_expired(struct timer_list *t)
 {
-       /* not an elegant detour, but there is no choice as the timer passes
-        * only one argument, and both sta_info and TID are needed, so init
-        * flow in sta_info_create gives the TID as data, while the timer_to_id
-        * array gives the sta through container_of */
-       u16 tid = *(u8 *)data;
-       struct sta_info *sta = container_of((void *)data,
-               struct sta_info, timer_to_tid[tid]);
+       struct tid_ampdu_tx *tid_tx_timer =
+               from_timer(tid_tx_timer, t, addba_resp_timer);
+       struct sta_info *sta = tid_tx_timer->sta;
+       u8 tid = tid_tx_timer->tid;
        struct tid_ampdu_tx *tid_tx;
 
        /* check if the TID waits for addBA response */
@@ -525,21 +527,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
  * After accepting the AddBA Response we activated a timer,
  * resetting it after each frame that we send.
  */
-static void sta_tx_agg_session_timer_expired(unsigned long data)
+static void sta_tx_agg_session_timer_expired(struct timer_list *t)
 {
-       /* not an elegant detour, but there is no choice as the timer passes
-        * only one argument, and various sta_info are needed here, so init
-        * flow in sta_info_create gives the TID as data, while the timer_to_id
-        * array gives the sta through container_of */
-       u8 *ptid = (u8 *)data;
-       u8 *timer_to_id = ptid - *ptid;
-       struct sta_info *sta = container_of(timer_to_id, struct sta_info,
-                                        timer_to_tid[0]);
+       struct tid_ampdu_tx *tid_tx_timer =
+               from_timer(tid_tx_timer, t, session_timer);
+       struct sta_info *sta = tid_tx_timer->sta;
+       u8 tid = tid_tx_timer->tid;
        struct tid_ampdu_tx *tid_tx;
        unsigned long timeout;
 
        rcu_read_lock();
-       tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
+       tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
        if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
                rcu_read_unlock();
                return;
@@ -555,9 +553,9 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
        rcu_read_unlock();
 
        ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
-              sta->sta.addr, (u16)*ptid);
+              sta->sta.addr, tid);
 
-       ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
+       ieee80211_stop_tx_ba_session(&sta->sta, tid);
 }
 
 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
@@ -670,16 +668,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
        __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
 
        tid_tx->timeout = timeout;
+       tid_tx->sta = sta;
+       tid_tx->tid = tid;
 
        /* response timer */
-       setup_timer(&tid_tx->addba_resp_timer,
-                   sta_addba_resp_timer_expired,
-                   (unsigned long)&sta->timer_to_tid[tid]);
+       timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);
 
        /* tx timer */
-       setup_deferrable_timer(&tid_tx->session_timer,
-                              sta_tx_agg_session_timer_expired,
-                              (unsigned long)&sta->timer_to_tid[tid]);
+       timer_setup(&tid_tx->session_timer,
+                   sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);
 
        /* assign a dialog token */
        sta->ampdu_mlme.dialog_token_allocator++;
index 41f5e48f802197218e79c976ea8f99d967cff91f..167f83b853e6bd391256e15ef99439b792e18cdc 100644 (file)
@@ -292,7 +292,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
 
        mutex_lock(&sta->ampdu_mlme.mtx);
        for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
-               ___ieee80211_stop_tx_ba_session(sta, i, reason);
                ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
                                                WLAN_REASON_QSTA_LEAVE_QBSS,
                                                reason != AGG_STOP_DESTROY_STA &&
@@ -300,6 +299,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
        }
        mutex_unlock(&sta->ampdu_mlme.mtx);
 
+       for (i = 0; i <  IEEE80211_NUM_TIDS; i++)
+               ___ieee80211_stop_tx_ba_session(sta, i, reason);
+
        /* stopping might queue the work again - so cancel only afterwards */
        cancel_work_sync(&sta->ampdu_mlme.work);
 
index e9c6aa3ed05b8ddb8cf03decce82c00e59b4c0a7..db07e0de9a0374229857f5a2fb4e081928ea936f 100644 (file)
@@ -1711,10 +1711,10 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
        sdata_unlock(sdata);
 }
 
-static void ieee80211_ibss_timer(unsigned long data)
+static void ieee80211_ibss_timer(struct timer_list *t)
 {
        struct ieee80211_sub_if_data *sdata =
-               (struct ieee80211_sub_if_data *) data;
+               from_timer(sdata, t, u.ibss.timer);
 
        ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 }
@@ -1723,8 +1723,7 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 
-       setup_timer(&ifibss->timer, ieee80211_ibss_timer,
-                   (unsigned long) sdata);
+       timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0);
        INIT_LIST_HEAD(&ifibss->incomplete_stations);
        spin_lock_init(&ifibss->incomplete_lock);
        INIT_WORK(&ifibss->csa_connection_drop_work,
index 68f874e73561e8fe4d1a2ba379b26205e9c2cdb4..885d00b419119a2bef3fd0b7c3f00f92c2a968ad 100644 (file)
@@ -1057,6 +1057,7 @@ struct tpt_led_trigger {
        const struct ieee80211_tpt_blink *blink_table;
        unsigned int blink_table_len;
        struct timer_list timer;
+       struct ieee80211_local *local;
        unsigned long prev_traffic;
        unsigned long tx_bytes, rx_bytes;
        unsigned int active, want;
@@ -1932,7 +1933,7 @@ static inline int ieee80211_ac_from_tid(int tid)
 
 void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
 void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
-void ieee80211_dynamic_ps_timer(unsigned long data);
+void ieee80211_dynamic_ps_timer(struct timer_list *t);
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
                             struct ieee80211_sub_if_data *sdata,
                             bool powersave);
index 0505845b7ab836c15888a06b3b7d76995d46e685..ba0b507ea6910f2d645f1fecfcb5d09877e2340c 100644 (file)
@@ -248,10 +248,10 @@ static unsigned long tpt_trig_traffic(struct ieee80211_local *local,
        return DIV_ROUND_UP(delta, 1024 / 8);
 }
 
-static void tpt_trig_timer(unsigned long data)
+static void tpt_trig_timer(struct timer_list *t)
 {
-       struct ieee80211_local *local = (void *)data;
-       struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
+       struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer);
+       struct ieee80211_local *local = tpt_trig->local;
        struct led_classdev *led_cdev;
        unsigned long on, off, tpt;
        int i;
@@ -306,8 +306,9 @@ __ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
        tpt_trig->blink_table = blink_table;
        tpt_trig->blink_table_len = blink_table_len;
        tpt_trig->want = flags;
+       tpt_trig->local = local;
 
-       setup_timer(&tpt_trig->timer, tpt_trig_timer, (unsigned long)local);
+       timer_setup(&tpt_trig->timer, tpt_trig_timer, 0);
 
        local->tpt_led_trigger = tpt_trig;
 
@@ -326,7 +327,7 @@ static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local)
        tpt_trig_traffic(local, tpt_trig);
        tpt_trig->running = true;
 
-       tpt_trig_timer((unsigned long)local);
+       tpt_trig_timer(&tpt_trig->timer);
        mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ));
 }
 
index 8aa1f5b6a05145b0838b494abdf2b2c5b0aa17ab..e054a2fd8d38bd00c0e181ca01180b19c3ffb132 100644 (file)
@@ -633,8 +633,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
                  ieee80211_dynamic_ps_enable_work);
        INIT_WORK(&local->dynamic_ps_disable_work,
                  ieee80211_dynamic_ps_disable_work);
-       setup_timer(&local->dynamic_ps_timer,
-                   ieee80211_dynamic_ps_timer, (unsigned long) local);
+       timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
 
        INIT_WORK(&local->sched_scan_stopped_work,
                  ieee80211_sched_scan_stopped_work);
index 7a76c4a6df306574f8189f132c148295ac14119e..5e27364e10acf3420e8f192b09488ab4bc252d09 100644 (file)
@@ -37,9 +37,10 @@ void ieee80211s_stop(void)
        kmem_cache_destroy(rm_cache);
 }
 
-static void ieee80211_mesh_housekeeping_timer(unsigned long data)
+static void ieee80211_mesh_housekeeping_timer(struct timer_list *t)
 {
-       struct ieee80211_sub_if_data *sdata = (void *) data;
+       struct ieee80211_sub_if_data *sdata =
+               from_timer(sdata, t, u.mesh.housekeeping_timer);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
@@ -528,18 +529,18 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
-static void ieee80211_mesh_path_timer(unsigned long data)
+static void ieee80211_mesh_path_timer(struct timer_list *t)
 {
        struct ieee80211_sub_if_data *sdata =
-               (struct ieee80211_sub_if_data *) data;
+               from_timer(sdata, t, u.mesh.mesh_path_timer);
 
        ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 }
 
-static void ieee80211_mesh_path_root_timer(unsigned long data)
+static void ieee80211_mesh_path_root_timer(struct timer_list *t)
 {
        struct ieee80211_sub_if_data *sdata =
-               (struct ieee80211_sub_if_data *) data;
+               from_timer(sdata, t, u.mesh.mesh_path_root_timer);
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
        set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
@@ -1442,9 +1443,8 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        static u8 zero_addr[ETH_ALEN] = {};
 
-       setup_timer(&ifmsh->housekeeping_timer,
-                   ieee80211_mesh_housekeeping_timer,
-                   (unsigned long) sdata);
+       timer_setup(&ifmsh->housekeeping_timer,
+                   ieee80211_mesh_housekeeping_timer, 0);
 
        ifmsh->accepting_plinks = true;
        atomic_set(&ifmsh->mpaths, 0);
@@ -1458,12 +1458,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
 
        mesh_pathtbl_init(sdata);
 
-       setup_timer(&ifmsh->mesh_path_timer,
-                   ieee80211_mesh_path_timer,
-                   (unsigned long) sdata);
-       setup_timer(&ifmsh->mesh_path_root_timer,
-                   ieee80211_mesh_path_root_timer,
-                   (unsigned long) sdata);
+       timer_setup(&ifmsh->mesh_path_timer, ieee80211_mesh_path_timer, 0);
+       timer_setup(&ifmsh->mesh_path_root_timer,
+                   ieee80211_mesh_path_root_timer, 0);
        INIT_LIST_HEAD(&ifmsh->preq_queue.list);
        skb_queue_head_init(&ifmsh->ps.bc_buf);
        spin_lock_init(&ifmsh->mesh_preq_queue_lock);
index 465b7853edc0b1c85a2145f2b248c81f86fb92eb..ee56f18cad3f7e89e1c60fe4829dab7bfa1ef340 100644 (file)
@@ -296,7 +296,7 @@ void mesh_path_tx_pending(struct mesh_path *mpath);
 int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
-void mesh_path_timer(unsigned long data);
+void mesh_path_timer(struct timer_list *t);
 void mesh_path_flush_by_nexthop(struct sta_info *sta);
 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
                             struct sk_buff *skb);
index 146ec6c0f12f86f1de27db23e51326b05bf3e643..4394463a0c2e6a4b8623c80da2c10729996a2b8d 100644 (file)
@@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        struct mesh_path *mpath;
        u8 ttl, flags, hopcount;
        const u8 *orig_addr;
-       u32 orig_sn, metric, metric_txsta, interval;
+       u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
        bool root_is_gate;
 
        ttl = rann->rann_ttl;
@@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        interval = le32_to_cpu(rann->rann_interval);
        hopcount = rann->rann_hopcount;
        hopcount++;
-       metric = le32_to_cpu(rann->rann_metric);
+       orig_metric = le32_to_cpu(rann->rann_metric);
 
        /*  Ignore our own RANNs */
        if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
                return;
        }
 
-       metric_txsta = airtime_link_metric_get(local, sta);
+       last_hop_metric = airtime_link_metric_get(local, sta);
+       new_metric = orig_metric + last_hop_metric;
+       if (new_metric < orig_metric)
+               new_metric = MAX_METRIC;
 
        mpath = mesh_path_lookup(sdata, orig_addr);
        if (!mpath) {
@@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        }
 
        if (!(SN_LT(mpath->sn, orig_sn)) &&
-           !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
+           !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
                rcu_read_unlock();
                return;
        }
@@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
        }
 
        mpath->sn = orig_sn;
-       mpath->rann_metric = metric + metric_txsta;
+       mpath->rann_metric = new_metric;
        mpath->is_root = true;
        /* Recording RANNs sender address to send individually
         * addressed PREQs destined for root mesh STA */
@@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
                mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
                                       orig_sn, 0, NULL, 0, broadcast_addr,
                                       hopcount, ttl, interval,
-                                      metric + metric_txsta, 0, sdata);
+                                      new_metric, 0, sdata);
        }
 
        rcu_read_unlock();
@@ -1194,9 +1197,9 @@ endlookup:
        return err;
 }
 
-void mesh_path_timer(unsigned long data)
+void mesh_path_timer(struct timer_list *t)
 {
-       struct mesh_path *mpath = (void *) data;
+       struct mesh_path *mpath = from_timer(mpath, t, timer);
        struct ieee80211_sub_if_data *sdata = mpath->sdata;
        int ret;
 
index 97269caafecd7b52e644e9bb645d305fdfb67196..86c8dfef56a4c8f021b68aa723ced9447b5b9602 100644 (file)
@@ -399,8 +399,7 @@ struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
        skb_queue_head_init(&new_mpath->frame_queue);
        new_mpath->exp_time = jiffies;
        spin_lock_init(&new_mpath->state_lock);
-       setup_timer(&new_mpath->timer, mesh_path_timer,
-                   (unsigned long) new_mpath);
+       timer_setup(&new_mpath->timer, mesh_path_timer, 0);
 
        return new_mpath;
 }
index e4ededa1909d86590bb1f96116f61951acefcd6c..c244691deab9c6dcae91305e429092dd6e23f1e2 100644 (file)
@@ -895,7 +895,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        struct ieee80211_hdr_3addr *nullfunc;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
+       skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
        if (!skb)
                return;
 
@@ -1066,10 +1066,10 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
 }
 EXPORT_SYMBOL(ieee80211_chswitch_done);
 
-static void ieee80211_chswitch_timer(unsigned long data)
+static void ieee80211_chswitch_timer(struct timer_list *t)
 {
        struct ieee80211_sub_if_data *sdata =
-               (struct ieee80211_sub_if_data *) data;
+               from_timer(sdata, t, u.mgd.chswitch_timer);
 
        ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
 }
@@ -1577,9 +1577,9 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
        }
 }
 
-void ieee80211_dynamic_ps_timer(unsigned long data)
+void ieee80211_dynamic_ps_timer(struct timer_list *t)
 {
-       struct ieee80211_local *local = (void *) data;
+       struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer);
 
        ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
 }
@@ -3711,10 +3711,10 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        sdata_unlock(sdata);
 }
 
-static void ieee80211_sta_timer(unsigned long data)
+static void ieee80211_sta_timer(struct timer_list *t)
 {
        struct ieee80211_sub_if_data *sdata =
-               (struct ieee80211_sub_if_data *) data;
+               from_timer(sdata, t, u.mgd.timer);
 
        ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 }
@@ -3991,10 +3991,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
        sdata_unlock(sdata);
 }
 
-static void ieee80211_sta_bcn_mon_timer(unsigned long data)
+static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
 {
        struct ieee80211_sub_if_data *sdata =
-               (struct ieee80211_sub_if_data *) data;
+               from_timer(sdata, t, u.mgd.bcn_mon_timer);
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
        if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
@@ -4005,10 +4005,10 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
                             &sdata->u.mgd.beacon_connection_loss_work);
 }
 
-static void ieee80211_sta_conn_mon_timer(unsigned long data)
+static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
 {
        struct ieee80211_sub_if_data *sdata =
-               (struct ieee80211_sub_if_data *) data;
+               from_timer(sdata, t, u.mgd.conn_mon_timer);
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_local *local = sdata->local;
 
@@ -4139,14 +4139,10 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
        INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_mgd_work);
        INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
                          ieee80211_tdls_peer_del_work);
-       setup_timer(&ifmgd->timer, ieee80211_sta_timer,
-                   (unsigned long) sdata);
-       setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
-                   (unsigned long) sdata);
-       setup_timer(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer,
-                   (unsigned long) sdata);
-       setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
-                   (unsigned long) sdata);
+       timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
+       timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0);
+       timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
+       timer_setup(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, 0);
        INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk,
                          ieee80211_sta_handle_tspec_ac_params_wk);
 
index 88e6ebbbe24f5562eb69df3c92ca132d15f5733b..d351dc1162beef273c8db1a6902db6216a97bfb7 100644 (file)
@@ -150,9 +150,10 @@ void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata)
        sdata_unlock(sdata);
 }
 
-static void ieee80211_ocb_housekeeping_timer(unsigned long data)
+static void ieee80211_ocb_housekeeping_timer(struct timer_list *t)
 {
-       struct ieee80211_sub_if_data *sdata = (void *)data;
+       struct ieee80211_sub_if_data *sdata =
+               from_timer(sdata, t, u.ocb.housekeeping_timer);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
 
@@ -165,9 +166,8 @@ void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
 
-       setup_timer(&ifocb->housekeeping_timer,
-                   ieee80211_ocb_housekeeping_timer,
-                   (unsigned long)sdata);
+       timer_setup(&ifocb->housekeeping_timer,
+                   ieee80211_ocb_housekeeping_timer, 0);
        INIT_LIST_HEAD(&ifocb->incomplete_stations);
        spin_lock_init(&ifocb->incomplete_lock);
 }
index a3060e55122c666eb3eedb6c8c93714e0783cab8..0c5627f8a104e17fb54f55c09da597ef84af5be3 100644 (file)
@@ -379,14 +379,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        if (sta_prepare_rate_control(local, sta, gfp))
                goto free_txq;
 
-       for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
-               /*
-                * timer_to_tid must be initialized with identity mapping
-                * to enable session_timer's data differentiation. See
-                * sta_rx_agg_session_timer_expired for usage.
-                */
-               sta->timer_to_tid[i] = i;
-       }
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                skb_queue_head_init(&sta->ps_tx_buf[i]);
                skb_queue_head_init(&sta->tx_filtered[i]);
@@ -1064,9 +1056,9 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
        return ret;
 }
 
-static void sta_info_cleanup(unsigned long data)
+static void sta_info_cleanup(struct timer_list *t)
 {
-       struct ieee80211_local *local = (struct ieee80211_local *) data;
+       struct ieee80211_local *local = from_timer(local, t, sta_cleanup);
        struct sta_info *sta;
        bool timer_needed = false;
 
@@ -1098,8 +1090,7 @@ int sta_info_init(struct ieee80211_local *local)
        mutex_init(&local->sta_mtx);
        INIT_LIST_HEAD(&local->sta_list);
 
-       setup_timer(&local->sta_cleanup, sta_info_cleanup,
-                   (unsigned long)local);
+       timer_setup(&local->sta_cleanup, sta_info_cleanup, 0);
        return 0;
 }
 
index 5c54acd10562a66df8aa093b9352d097cadb17b9..cd53619435b641c446ed4e0e69eda1d97714f54a 100644 (file)
@@ -126,6 +126,8 @@ enum ieee80211_agg_stop_reason {
        AGG_STOP_DESTROY_STA,
 };
 
+struct sta_info;
+
 /**
  * struct tid_ampdu_tx - TID aggregation information (Tx).
  *
@@ -133,8 +135,10 @@ enum ieee80211_agg_stop_reason {
  * @session_timer: check if we keep Tx-ing on the TID (by timeout value)
  * @addba_resp_timer: timer for peer's response to addba request
  * @pending: pending frames queue -- use sta's spinlock to protect
+ * @sta: station we are attached to
  * @dialog_token: dialog token for aggregation session
  * @timeout: session timeout value to be filled in ADDBA requests
+ * @tid: TID number
  * @state: session state (see above)
  * @last_tx: jiffies of last tx activity
  * @stop_initiator: initiator of a session stop
@@ -158,6 +162,7 @@ struct tid_ampdu_tx {
        struct timer_list session_timer;
        struct timer_list addba_resp_timer;
        struct sk_buff_head pending;
+       struct sta_info *sta;
        unsigned long state;
        unsigned long last_tx;
        u16 timeout;
@@ -169,6 +174,7 @@ struct tid_ampdu_tx {
        u16 failed_bar_ssn;
        bool bar_pending;
        bool amsdu;
+       u8 tid;
 };
 
 /**
@@ -181,12 +187,14 @@ struct tid_ampdu_tx {
  * @reorder_time: jiffies when skb was added
  * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
  * @reorder_timer: releases expired frames from the reorder buffer.
+ * @sta: station we are attached to
  * @last_rx: jiffies of last rx activity
  * @head_seq_num: head sequence number in reordering buffer.
  * @stored_mpdu_num: number of MPDUs in reordering buffer
  * @ssn: Starting Sequence Number expected to be aggregated.
  * @buf_size: buffer size for incoming A-MPDUs
  * @timeout: reset timer value (in TUs).
+ * @tid: TID number
  * @rcu_head: RCU head used for freeing this struct
  * @reorder_lock: serializes access to reorder buffer, see below.
  * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
@@ -208,6 +216,7 @@ struct tid_ampdu_rx {
        u64 reorder_buf_filtered;
        struct sk_buff_head *reorder_buf;
        unsigned long *reorder_time;
+       struct sta_info *sta;
        struct timer_list session_timer;
        struct timer_list reorder_timer;
        unsigned long last_rx;
@@ -216,6 +225,7 @@ struct tid_ampdu_rx {
        u16 ssn;
        u16 buf_size;
        u16 timeout;
+       u8 tid;
        u8 auto_seq:1,
           removed:1,
           started:1;
@@ -447,7 +457,6 @@ struct ieee80211_sta_rx_stats {
  *     plus one for non-QoS frames)
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
- * @timer_to_tid: identity mapping to ID timers
  * @mesh: mesh STA information
  * @debugfs_dir: debug filesystem directory dentry
  * @dead: set to true when sta is unlinked
@@ -554,7 +563,6 @@ struct sta_info {
         * Aggregation information, locked with lock.
         */
        struct sta_ampdu_mlme ampdu_mlme;
-       u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct dentry *debugfs_dir;
index 7b8154474b9e6df129e94d1fc7accc63b7eac7c8..3160954fc406049e17abd1b897d07ba86ed35b02 100644 (file)
@@ -4438,13 +4438,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
 EXPORT_SYMBOL(ieee80211_pspoll_get);
 
 struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif)
+                                      struct ieee80211_vif *vif,
+                                      bool qos_ok)
 {
        struct ieee80211_hdr_3addr *nullfunc;
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_if_managed *ifmgd;
        struct ieee80211_local *local;
        struct sk_buff *skb;
+       bool qos = false;
 
        if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
                return NULL;
@@ -4453,7 +4455,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
        ifmgd = &sdata->u.mgd;
        local = sdata->local;
 
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
+       if (qos_ok) {
+               struct sta_info *sta;
+
+               rcu_read_lock();
+               sta = sta_info_get(sdata, ifmgd->bssid);
+               qos = sta && sta->sta.wme;
+               rcu_read_unlock();
+       }
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                           sizeof(*nullfunc) + 2);
        if (!skb)
                return NULL;
 
@@ -4463,6 +4475,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
        nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                              IEEE80211_STYPE_NULLFUNC |
                                              IEEE80211_FCTL_TODS);
+       if (qos) {
+               __le16 qos = cpu_to_le16(7);
+
+               BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
+                             IEEE80211_STYPE_NULLFUNC) !=
+                            IEEE80211_STYPE_QOS_NULLFUNC);
+               nullfunc->frame_control |=
+                       cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
+               skb->priority = 7;
+               skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+               skb_put_data(skb, &qos, sizeof(qos));
+       }
+
        memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
        memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
        memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
index a2b904a718c6124d133aa94a59b86448f666b739..c989211bbabc6475d805f6adf1957157b2e6727c 100644 (file)
@@ -184,9 +184,9 @@ report:
        nd->handler(nd);
 }
 
-static void ncsi_channel_monitor(unsigned long data)
+static void ncsi_channel_monitor(struct timer_list *t)
 {
-       struct ncsi_channel *nc = (struct ncsi_channel *)data;
+       struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
        struct ncsi_package *np = nc->package;
        struct ncsi_dev_priv *ndp = np->ndp;
        struct ncsi_channel_mode *ncm;
@@ -313,8 +313,7 @@ struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
        nc->package = np;
        nc->state = NCSI_CHANNEL_INACTIVE;
        nc->monitor.enabled = false;
-       setup_timer(&nc->monitor.timer,
-                   ncsi_channel_monitor, (unsigned long)nc);
+       timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
        spin_lock_init(&nc->lock);
        INIT_LIST_HEAD(&nc->link);
        for (index = 0; index < NCSI_CAP_MAX; index++)
@@ -529,9 +528,9 @@ struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
        return NULL;
 }
 
-static void ncsi_request_timeout(unsigned long data)
+static void ncsi_request_timeout(struct timer_list *t)
 {
-       struct ncsi_request *nr = (struct ncsi_request *)data;
+       struct ncsi_request *nr = from_timer(nr, t, timer);
        struct ncsi_dev_priv *ndp = nr->ndp;
        unsigned long flags;
 
@@ -1577,9 +1576,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
        for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
                ndp->requests[i].id = i;
                ndp->requests[i].ndp = ndp;
-               setup_timer(&ndp->requests[i].timer,
-                           ncsi_request_timeout,
-                           (unsigned long)&ndp->requests[i]);
+               timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
        }
 
        spin_lock_irqsave(&ncsi_dev_lock, flags);
index 64778f9a85481fd69faff2fcd0eaa64031ab06d9..d6748a8a79c5666b11d963a8755df557d8d77e59 100644 (file)
@@ -67,9 +67,9 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
 }
 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
 
-static void nf_ct_expectation_timed_out(unsigned long ul_expect)
+static void nf_ct_expectation_timed_out(struct timer_list *t)
 {
-       struct nf_conntrack_expect *exp = (void *)ul_expect;
+       struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
 
        spin_lock_bh(&nf_conntrack_expect_lock);
        nf_ct_unlink_expect(exp);
@@ -368,8 +368,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        /* two references : one for hash insert, one for the timer */
        refcount_add(2, &exp->use);
 
-       setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
-                   (unsigned long)exp);
+       timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
        helper = rcu_dereference_protected(master_help->helper,
                                           lockdep_is_held(&nf_conntrack_expect_lock));
        if (helper) {
index 6e0adfefb9ed4fa8256c1ece604c05ef117c1e68..59c08997bfdfdb9c16aa7e9cc1d33f62a46a1769 100644 (file)
@@ -533,6 +533,7 @@ nla_put_failure:
        return -1;
 }
 
+#if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
 static size_t ctnetlink_proto_size(const struct nf_conn *ct)
 {
        const struct nf_conntrack_l3proto *l3proto;
@@ -552,6 +553,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
 
        return len + len4;
 }
+#endif
 
 static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
 {
index cad6498f10b03fca0e873eb8718734b471a65f9c..e5afab86381ca1f1487195009ffde4b8eaeec3e2 100644 (file)
@@ -151,7 +151,7 @@ instance_put(struct nfulnl_instance *inst)
                call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
 }
 
-static void nfulnl_timer(unsigned long data);
+static void nfulnl_timer(struct timer_list *t);
 
 static struct nfulnl_instance *
 instance_create(struct net *net, u_int16_t group_num,
@@ -184,7 +184,7 @@ instance_create(struct net *net, u_int16_t group_num,
        /* needs to be two, since we _put() after creation */
        refcount_set(&inst->use, 2);
 
-       setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
+       timer_setup(&inst->timer, nfulnl_timer, 0);
 
        inst->net = get_net(net);
        inst->peer_user_ns = user_ns;
@@ -377,9 +377,9 @@ __nfulnl_flush(struct nfulnl_instance *inst)
 }
 
 static void
-nfulnl_timer(unsigned long data)
+nfulnl_timer(struct timer_list *t)
 {
-       struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
+       struct nfulnl_instance *inst = from_timer(inst, t, timer);
 
        spin_lock_bh(&inst->lock);
        if (inst->skb)
index daf45da448fab4406cf4b5727404c88c1f0759be..ee3421ad108da72bd769de43e5ff5540212b11fe 100644 (file)
@@ -107,9 +107,9 @@ static void idletimer_tg_work(struct work_struct *work)
        sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
 }
 
-static void idletimer_tg_expired(unsigned long data)
+static void idletimer_tg_expired(struct timer_list *t)
 {
-       struct idletimer_tg *timer = (struct idletimer_tg *) data;
+       struct idletimer_tg *timer = from_timer(timer, t, timer);
 
        pr_debug("timer %s expired\n", timer->attr.attr.name);
 
@@ -143,8 +143,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
 
        list_add(&info->timer->entry, &idletimer_tg_list);
 
-       setup_timer(&info->timer->timer, idletimer_tg_expired,
-                   (unsigned long) info->timer);
+       timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
        info->timer->refcnt = 1;
 
        mod_timer(&info->timer->timer,
index 3ba31c194ccec3101aa0979bb18a693d30d1c68b..0971634e5444559cb5380a9f9927dc3294db3452 100644 (file)
@@ -85,9 +85,10 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static void led_timeout_callback(unsigned long data)
+static void led_timeout_callback(struct timer_list *t)
 {
-       struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data;
+       struct xt_led_info_internal *ledinternal = from_timer(ledinternal, t,
+                                                             timer);
 
        led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
 }
@@ -143,8 +144,7 @@ static int led_tg_check(const struct xt_tgchk_param *par)
 
        /* See if we need to set up a timer */
        if (ledinfo->delay > 0)
-               setup_timer(&ledinternal->timer, led_timeout_callback,
-                           (unsigned long)ledinternal);
+               timer_setup(&ledinternal->timer, led_timeout_callback, 0);
 
        list_add_tail(&ledinternal->list, &xt_led_triggers);
 
index d0f38bc9af6d8f485f3abe5cf5b923359d314ac1..ac709f0f197b388171f06df03afbecb1920cff83 100644 (file)
@@ -87,7 +87,7 @@ static inline struct netlbl_af4list *__af4list_valid_rcu(struct list_head *s,
        struct list_head *i = s;
        struct netlbl_af4list *n = __af4list_entry(s);
        while (i != h && !n->valid) {
-               i = rcu_dereference(i->next);
+               i = rcu_dereference(list_next_rcu(i));
                n = __af4list_entry(i);
        }
        return n;
@@ -154,7 +154,7 @@ static inline struct netlbl_af6list *__af6list_valid_rcu(struct list_head *s,
        struct list_head *i = s;
        struct netlbl_af6list *n = __af6list_entry(s);
        while (i != h && !n->valid) {
-               i = rcu_dereference(i->next);
+               i = rcu_dereference(list_next_rcu(i));
                n = __af6list_entry(i);
        }
        return n;
index 2dec3583c97d00df654de19752db89c256f7de6d..7ed9d4422a73decbe395eb603d7d106436d9f3a4 100644 (file)
@@ -284,7 +284,7 @@ void nr_destroy_socket(struct sock *sk)
 
        if (sk_has_allocations(sk)) {
                /* Defer: outstanding buffers */
-               sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_destroy_timer;
+               sk->sk_timer.function = nr_destroy_timer;
                sk->sk_timer.expires  = jiffies + 2 * HZ;
                add_timer(&sk->sk_timer);
        } else
index 989ae647825ef4568c0bd5717c8721e85cde46c6..215ad22a96476ebb9d30919e99d67bda8e1ce88f 100644 (file)
@@ -15,7 +15,7 @@
 #include <net/netrom.h>
 #include <linux/init.h>
 
-static void nr_loopback_timer(unsigned long);
+static void nr_loopback_timer(struct timer_list *);
 
 static struct sk_buff_head loopback_queue;
 static DEFINE_TIMER(loopback_timer, nr_loopback_timer);
@@ -48,7 +48,7 @@ int nr_loopback_queue(struct sk_buff *skb)
        return 1;
 }
 
-static void nr_loopback_timer(unsigned long param)
+static void nr_loopback_timer(struct timer_list *unused)
 {
        struct sk_buff *skb;
        ax25_address *nr_dest;
index 43569aea0f5e2d9bcd941247e2df3be0d79913a8..cbd51ed5a2d7bef7540ea8a8a2e0fea9ff8fc02f 100644 (file)
@@ -45,7 +45,7 @@ void nr_init_timers(struct sock *sk)
        timer_setup(&nr->idletimer, nr_idletimer_expiry, 0);
 
        /* initialized by sock_init_data */
-       sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_heartbeat_expiry;
+       sk->sk_timer.function = nr_heartbeat_expiry;
 }
 
 void nr_start_t1timer(struct sock *sk)
index c25e9b4179c34b571c6adbb65f3cff9f60dad06d..074960154993fef5061cece1009b014c10668737 100644 (file)
@@ -591,18 +591,18 @@ static int nci_close_device(struct nci_dev *ndev)
 }
 
 /* NCI command timer function */
-static void nci_cmd_timer(unsigned long arg)
+static void nci_cmd_timer(struct timer_list *t)
 {
-       struct nci_dev *ndev = (void *) arg;
+       struct nci_dev *ndev = from_timer(ndev, t, cmd_timer);
 
        atomic_set(&ndev->cmd_cnt, 1);
        queue_work(ndev->cmd_wq, &ndev->cmd_work);
 }
 
 /* NCI data exchange timer function */
-static void nci_data_timer(unsigned long arg)
+static void nci_data_timer(struct timer_list *t)
 {
-       struct nci_dev *ndev = (void *) arg;
+       struct nci_dev *ndev = from_timer(ndev, t, data_timer);
 
        set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
        queue_work(ndev->rx_wq, &ndev->rx_work);
@@ -1232,10 +1232,8 @@ int nci_register_device(struct nci_dev *ndev)
        skb_queue_head_init(&ndev->rx_q);
        skb_queue_head_init(&ndev->tx_q);
 
-       setup_timer(&ndev->cmd_timer, nci_cmd_timer,
-                   (unsigned long) ndev);
-       setup_timer(&ndev->data_timer, nci_data_timer,
-                   (unsigned long) ndev);
+       timer_setup(&ndev->cmd_timer, nci_cmd_timer, 0);
+       timer_setup(&ndev->data_timer, nci_data_timer, 0);
 
        mutex_init(&ndev->req_lock);
        INIT_LIST_HEAD(&ndev->conn_info_list);
index f6359c277212518d7725b0b91978500b244f4da8..c0b83dc9d99305850a61b647fe362b6ed52c50aa 100644 (file)
@@ -75,7 +75,7 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
        if (!hdr)
                return -EMSGSIZE;
 
-       genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
+       genl_dump_check_consistent(cb, hdr);
 
        if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
            nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
@@ -603,7 +603,7 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
                return -EMSGSIZE;
 
        if (cb)
-               genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
+               genl_dump_check_consistent(cb, hdr);
 
        if (nfc_genl_setup_device_added(dev, msg))
                goto nla_put_failure;
@@ -1356,7 +1356,7 @@ static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev,
                        goto nla_put_failure;
 
                if (cb)
-                       genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
+                       genl_dump_check_consistent(cb, hdr);
 
                if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
                    nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) ||
index 0dab33fb9844cd0b2207c2d816780391fb08baa3..ef38e5aecd2851d61d204e4e04c9d6d723f14887 100644 (file)
@@ -308,6 +308,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
                             const struct dp_upcall_info *upcall_info,
                                 uint32_t cutlen)
 {
+       unsigned int gso_type = skb_shinfo(skb)->gso_type;
+       struct sw_flow_key later_key;
        struct sk_buff *segs, *nskb;
        int err;
 
@@ -318,9 +320,21 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
        if (segs == NULL)
                return -EINVAL;
 
+       if (gso_type & SKB_GSO_UDP) {
+               /* The initial flow key extracted by ovs_flow_key_extract()
+                * in this case is for a first fragment, so we need to
+                * properly mark later fragments.
+                */
+               later_key = *key;
+               later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+       }
+
        /* Queue all of the segments. */
        skb = segs;
        do {
+               if (gso_type & SKB_GSO_UDP && skb != segs)
+                       key = &later_key;
+
                err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
                if (err)
                        break;
index 864ddb1e3642bd26d8ca8a9153c9f16d5d766680..dbe2379329c5517fb164b6024d40fabebe7855c8 100644 (file)
@@ -631,7 +631,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                        key->ip.frag = OVS_FRAG_TYPE_LATER;
                        return 0;
                }
-               if (nh->frag_off & htons(IP_MF))
+               if (nh->frag_off & htons(IP_MF) ||
+                       skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
                        key->ip.frag = OVS_FRAG_TYPE_FIRST;
                else
                        key->ip.frag = OVS_FRAG_TYPE_NONE;
@@ -747,6 +748,9 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 
                if (key->ip.frag == OVS_FRAG_TYPE_LATER)
                        return 0;
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
                /* Transport layer. */
                if (key->ip.proto == NEXTHDR_TCP) {
                        if (tcphdr_ok(skb)) {
index dc424798ba6f3afcb3095e710ffaf5dfb42b85fe..624ea74353dd3ba403ccedc822f1c9574982e709 100644 (file)
@@ -2241,14 +2241,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
 
 #define MAX_ACTIONS_BUFSIZE    (32 * 1024)
 
-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
+static struct sw_flow_actions *nla_alloc_flow_actions(int size)
 {
        struct sw_flow_actions *sfa;
 
-       if (size > MAX_ACTIONS_BUFSIZE) {
-               OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
-               return ERR_PTR(-EINVAL);
-       }
+       WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
 
        sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
        if (!sfa)
@@ -2321,12 +2318,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
        new_acts_size = ksize(*sfa) * 2;
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
-               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
+               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+                       OVS_NLERR(log, "Flow action size exceeds max %u",
+                                 MAX_ACTIONS_BUFSIZE);
                        return ERR_PTR(-EMSGSIZE);
+               }
                new_acts_size = MAX_ACTIONS_BUFSIZE;
        }
 
-       acts = nla_alloc_flow_actions(new_acts_size, log);
+       acts = nla_alloc_flow_actions(new_acts_size);
        if (IS_ERR(acts))
                return (void *)acts;
 
@@ -3059,7 +3059,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 {
        int err;
 
-       *sfa = nla_alloc_flow_actions(nla_len(attr), log);
+       *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
        if (IS_ERR(*sfa))
                return PTR_ERR(*sfa);
 
index 737092ca9b4eed464b6c0907d85b679ae4da6046..da215e5c139928132497c952aea91c61929e70b6 100644 (file)
@@ -1687,7 +1687,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                atomic_long_set(&rollover->num, 0);
                atomic_long_set(&rollover->num_huge, 0);
                atomic_long_set(&rollover->num_failed, 0);
-               po->rollover = rollover;
        }
 
        if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
@@ -1745,6 +1744,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
                if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
                        __dev_remove_pack(&po->prot_hook);
                        po->fanout = match;
+                       po->rollover = rollover;
+                       rollover = NULL;
                        refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
                        __fanout_link(sk, po);
                        err = 0;
@@ -1758,10 +1759,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
        }
 
 out:
-       if (err && rollover) {
-               kfree_rcu(rollover, rcu);
-               po->rollover = NULL;
-       }
+       kfree(rollover);
        mutex_unlock(&fanout_mutex);
        return err;
 }
@@ -1785,11 +1783,6 @@ static struct packet_fanout *fanout_release(struct sock *sk)
                        list_del(&f->list);
                else
                        f = NULL;
-
-               if (po->rollover) {
-                       kfree_rcu(po->rollover, rcu);
-                       po->rollover = NULL;
-               }
        }
        mutex_unlock(&fanout_mutex);
 
@@ -3029,6 +3022,7 @@ static int packet_release(struct socket *sock)
        synchronize_net();
 
        if (f) {
+               kfree(po->rollover);
                fanout_release_data(f);
                kfree(f);
        }
@@ -3097,6 +3091,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
        if (need_rehook) {
                if (po->running) {
                        rcu_read_unlock();
+                       /* prevents packet_notifier() from calling
+                        * register_prot_hook()
+                        */
+                       po->num = 0;
                        __unregister_prot_hook(sk, true);
                        rcu_read_lock();
                        dev_curr = po->prot_hook.dev;
@@ -3105,6 +3103,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
                                                                 dev->ifindex);
                }
 
+               BUG_ON(po->running);
                po->num = proto;
                po->prot_hook.type = proto;
 
@@ -3843,7 +3842,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        void *data = &val;
        union tpacket_stats_u st;
        struct tpacket_rollover_stats rstats;
-       struct packet_rollover *rollover;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3922,18 +3920,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                       0);
                break;
        case PACKET_ROLLOVER_STATS:
-               rcu_read_lock();
-               rollover = rcu_dereference(po->rollover);
-               if (rollover) {
-                       rstats.tp_all = atomic_long_read(&rollover->num);
-                       rstats.tp_huge = atomic_long_read(&rollover->num_huge);
-                       rstats.tp_failed = atomic_long_read(&rollover->num_failed);
-                       data = &rstats;
-                       lv = sizeof(rstats);
-               }
-               rcu_read_unlock();
-               if (!rollover)
+               if (!po->rollover)
                        return -EINVAL;
+               rstats.tp_all = atomic_long_read(&po->rollover->num);
+               rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+               rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+               data = &rstats;
+               lv = sizeof(rstats);
                break;
        case PACKET_TX_HAS_OFF:
                val = po->tp_tx_has_off;
index 562fbc155006374862e5bfdd78b65a7f46210bea..a1d2b2319ae990d55bb3c469cc1ba404cadcbdb9 100644 (file)
@@ -95,7 +95,6 @@ struct packet_fanout {
 
 struct packet_rollover {
        int                     sock;
-       struct rcu_head         rcu;
        atomic_long_t           num;
        atomic_long_t           num_huge;
        atomic_long_t           num_failed;
index cda4c6678ef16708c185b220f24647ee308f9a93..62055d3069d2a6849d6bc37df8f1d2b98ab420ac 100644 (file)
@@ -37,7 +37,7 @@ void rose_start_ftimer(struct rose_neigh *neigh)
 {
        del_timer(&neigh->ftimer);
 
-       neigh->ftimer.function = (TIMER_FUNC_TYPE)rose_ftimer_expiry;
+       neigh->ftimer.function = rose_ftimer_expiry;
        neigh->ftimer.expires  =
                jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
 
@@ -48,7 +48,7 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
 {
        del_timer(&neigh->t0timer);
 
-       neigh->t0timer.function = (TIMER_FUNC_TYPE)rose_t0timer_expiry;
+       neigh->t0timer.function = rose_t0timer_expiry;
        neigh->t0timer.expires  =
                jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
 
index ea613b2a97358a9c60a21b7744e7d564f1eb7d4c..74555fb9561547a2e01e188c38304b629c3bf8ec 100644 (file)
@@ -36,7 +36,7 @@ void rose_start_heartbeat(struct sock *sk)
 {
        del_timer(&sk->sk_timer);
 
-       sk->sk_timer.function = (TIMER_FUNC_TYPE)rose_heartbeat_expiry;
+       sk->sk_timer.function = rose_heartbeat_expiry;
        sk->sk_timer.expires  = jiffies + 5 * HZ;
 
        add_timer(&sk->sk_timer);
@@ -48,7 +48,7 @@ void rose_start_t1timer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t1;
 
        add_timer(&rose->timer);
@@ -60,7 +60,7 @@ void rose_start_t2timer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t2;
 
        add_timer(&rose->timer);
@@ -72,7 +72,7 @@ void rose_start_t3timer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->t3;
 
        add_timer(&rose->timer);
@@ -84,7 +84,7 @@ void rose_start_hbtimer(struct sock *sk)
 
        del_timer(&rose->timer);
 
-       rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+       rose->timer.function = rose_timer_expiry;
        rose->timer.expires  = jiffies + rose->hb;
 
        add_timer(&rose->timer);
@@ -97,7 +97,7 @@ void rose_start_idletimer(struct sock *sk)
        del_timer(&rose->idletimer);
 
        if (rose->idle > 0) {
-               rose->idletimer.function = (TIMER_FUNC_TYPE)rose_idletimer_expiry;
+               rose->idletimer.function = rose_idletimer_expiry;
                rose->idletimer.expires  = jiffies + rose->idle;
 
                add_timer(&rose->idletimer);
index 9b5c46b052fd07cb9ee82cea6fef0be3e32ffb4e..8f7cf4c042be2b9b4379968655bea594a2928546 100644 (file)
@@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
                                           bool upgrade)
 {
        struct rxrpc_conn_parameters cp;
+       struct rxrpc_call_params p;
        struct rxrpc_call *call;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
        int ret;
@@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        if (key && !key->payload.data[0])
                key = NULL; /* a no-security key */
 
+       memset(&p, 0, sizeof(p));
+       p.user_call_ID = user_call_ID;
+       p.tx_total_len = tx_total_len;
+
        memset(&cp, 0, sizeof(cp));
        cp.local                = rx->local;
        cp.key                  = key;
@@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        cp.exclusive            = false;
        cp.upgrade              = upgrade;
        cp.service_id           = srx->srx_service;
-       call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
-                                    gfp);
+       call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
        /* The socket has been unlocked. */
        if (!IS_ERR(call)) {
                call->notify_rx = notify_rx;
@@ -863,6 +867,19 @@ static int rxrpc_release_sock(struct sock *sk)
        sock_orphan(sk);
        sk->sk_shutdown = SHUTDOWN_MASK;
 
+       /* We want to kill off all connections from a service socket
+        * as fast as possible because we can't share these; client
+        * sockets, on the other hand, can share an endpoint.
+        */
+       switch (sk->sk_state) {
+       case RXRPC_SERVER_BOUND:
+       case RXRPC_SERVER_BOUND2:
+       case RXRPC_SERVER_LISTENING:
+       case RXRPC_SERVER_LISTEN_DISABLED:
+               rx->local->service_closed = true;
+               break;
+       }
+
        spin_lock_bh(&sk->sk_receive_queue.lock);
        sk->sk_state = RXRPC_CLOSE;
        spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -878,6 +895,8 @@ static int rxrpc_release_sock(struct sock *sk)
        rxrpc_release_calls_on_socket(rx);
        flush_workqueue(rxrpc_workqueue);
        rxrpc_purge_queue(&sk->sk_receive_queue);
+       rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
+       rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
 
        rxrpc_put_local(rx->local);
        rx->local = NULL;
index b2151993d384bd4b0602256e755e3ca06295e10d..416688381eb7d4d1a6fc71d826f0f2e35eda3b31 100644 (file)
@@ -79,17 +79,20 @@ struct rxrpc_net {
        struct list_head        conn_proc_list; /* List of conns in this namespace for proc */
        struct list_head        service_conns;  /* Service conns in this namespace */
        rwlock_t                conn_lock;      /* Lock for ->conn_proc_list, ->service_conns */
-       struct delayed_work     service_conn_reaper;
+       struct work_struct      service_conn_reaper;
+       struct timer_list       service_conn_reap_timer;
 
        unsigned int            nr_client_conns;
        unsigned int            nr_active_client_conns;
        bool                    kill_all_client_conns;
+       bool                    live;
        spinlock_t              client_conn_cache_lock; /* Lock for ->*_client_conns */
        spinlock_t              client_conn_discard_lock; /* Prevent multiple discarders */
        struct list_head        waiting_client_conns;
        struct list_head        active_client_conns;
        struct list_head        idle_client_conns;
-       struct delayed_work     client_conn_reaper;
+       struct work_struct      client_conn_reaper;
+       struct timer_list       client_conn_reap_timer;
 
        struct list_head        local_endpoints;
        struct mutex            local_mutex;    /* Lock for ->local_endpoints */
@@ -265,6 +268,7 @@ struct rxrpc_local {
        rwlock_t                services_lock;  /* lock for services list */
        int                     debug_id;       /* debug ID for printks */
        bool                    dead;
+       bool                    service_closed; /* Service socket closed */
        struct sockaddr_rxrpc   srx;            /* local address */
 };
 
@@ -338,8 +342,17 @@ enum rxrpc_conn_flag {
        RXRPC_CONN_DONT_REUSE,          /* Don't reuse this connection */
        RXRPC_CONN_COUNTED,             /* Counted by rxrpc_nr_client_conns */
        RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
+       RXRPC_CONN_FINAL_ACK_0,         /* Need final ACK for channel 0 */
+       RXRPC_CONN_FINAL_ACK_1,         /* Need final ACK for channel 1 */
+       RXRPC_CONN_FINAL_ACK_2,         /* Need final ACK for channel 2 */
+       RXRPC_CONN_FINAL_ACK_3,         /* Need final ACK for channel 3 */
 };
 
+#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) |   \
+                                  (1UL << RXRPC_CONN_FINAL_ACK_1) |    \
+                                  (1UL << RXRPC_CONN_FINAL_ACK_2) |    \
+                                  (1UL << RXRPC_CONN_FINAL_ACK_3))
+
 /*
  * Events that can be raised upon a connection.
  */
@@ -393,6 +406,7 @@ struct rxrpc_connection {
 #define RXRPC_ACTIVE_CHANS_MASK        ((1 << RXRPC_MAXCALLS) - 1)
        struct list_head        waiting_calls;  /* Calls waiting for channels */
        struct rxrpc_channel {
+               unsigned long           final_ack_at;   /* Time at which to issue final ACK */
                struct rxrpc_call __rcu *call;          /* Active call */
                u32                     call_id;        /* ID of current call */
                u32                     call_counter;   /* Call ID counter */
@@ -404,6 +418,7 @@ struct rxrpc_connection {
                };
        } channels[RXRPC_MAXCALLS];
 
+       struct timer_list       timer;          /* Conn event timer */
        struct work_struct      processor;      /* connection event processor */
        union {
                struct rb_node  client_node;    /* Node in local->client_conns */
@@ -457,9 +472,10 @@ enum rxrpc_call_flag {
 enum rxrpc_call_event {
        RXRPC_CALL_EV_ACK,              /* need to generate ACK */
        RXRPC_CALL_EV_ABORT,            /* need to generate abort */
-       RXRPC_CALL_EV_TIMER,            /* Timer expired */
        RXRPC_CALL_EV_RESEND,           /* Tx resend required */
        RXRPC_CALL_EV_PING,             /* Ping send required */
+       RXRPC_CALL_EV_EXPIRED,          /* Expiry occurred */
+       RXRPC_CALL_EV_ACK_LOST,         /* ACK may be lost, send ping */
 };
 
 /*
@@ -503,10 +519,16 @@ struct rxrpc_call {
        struct rxrpc_peer       *peer;          /* Peer record for remote address */
        struct rxrpc_sock __rcu *socket;        /* socket responsible */
        struct mutex            user_mutex;     /* User access mutex */
-       ktime_t                 ack_at;         /* When deferred ACK needs to happen */
-       ktime_t                 resend_at;      /* When next resend needs to happen */
-       ktime_t                 ping_at;        /* When next to send a ping */
-       ktime_t                 expire_at;      /* When the call times out */
+       unsigned long           ack_at;         /* When deferred ACK needs to happen */
+       unsigned long           ack_lost_at;    /* When ACK is figured as lost */
+       unsigned long           resend_at;      /* When next resend needs to happen */
+       unsigned long           ping_at;        /* When next to send a ping */
+       unsigned long           keepalive_at;   /* When next to send a keepalive ping */
+       unsigned long           expect_rx_by;   /* When we expect to get a packet by */
+       unsigned long           expect_req_by;  /* When we expect to get a request DATA packet by */
+       unsigned long           expect_term_by; /* When we expect call termination by */
+       u32                     next_rx_timo;   /* Timeout for next Rx packet (jif) */
+       u32                     next_req_timo;  /* Timeout for next Rx request packet (jif) */
        struct timer_list       timer;          /* Combined event timer */
        struct work_struct      processor;      /* Event processor */
        rxrpc_notify_rx_t       notify_rx;      /* kernel service Rx notification function */
@@ -609,6 +631,8 @@ struct rxrpc_call {
        ktime_t                 acks_latest_ts; /* Timestamp of latest ACK received */
        rxrpc_serial_t          acks_latest;    /* serial number of latest ACK received */
        rxrpc_seq_t             acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
+       rxrpc_seq_t             acks_lost_top;  /* tx_top at the time lost-ack ping sent */
+       rxrpc_serial_t          acks_lost_ping; /* Serial number of probe ACK */
 };
 
 /*
@@ -632,6 +656,35 @@ struct rxrpc_ack_summary {
        u8                      cumulative_acks;
 };
 
+/*
+ * sendmsg() cmsg-specified parameters.
+ */
+enum rxrpc_command {
+       RXRPC_CMD_SEND_DATA,            /* send data message */
+       RXRPC_CMD_SEND_ABORT,           /* request abort generation */
+       RXRPC_CMD_ACCEPT,               /* [server] accept incoming call */
+       RXRPC_CMD_REJECT_BUSY,          /* [server] reject a call as busy */
+};
+
+struct rxrpc_call_params {
+       s64                     tx_total_len;   /* Total Tx data length (if send data) */
+       unsigned long           user_call_ID;   /* User's call ID */
+       struct {
+               u32             hard;           /* Maximum lifetime (sec) */
+               u32             idle;           /* Max time since last data packet (msec) */
+               u32             normal;         /* Max time since last call packet (msec) */
+       } timeouts;
+       u8                      nr_timeouts;    /* Number of timeouts specified */
+};
+
+struct rxrpc_send_params {
+       struct rxrpc_call_params call;
+       u32                     abort_code;     /* Abort code to Tx (if abort) */
+       enum rxrpc_command      command : 8;    /* The command to implement */
+       bool                    exclusive;      /* Shared or exclusive call */
+       bool                    upgrade;        /* If the connection is upgradeable */
+};
+
 #include <trace/events/rxrpc.h>
 
 /*
@@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *);
 /*
  * call_event.c
  */
-void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
-void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
                       enum rxrpc_propose_ack_trace);
 void rxrpc_process_call(struct work_struct *);
 
+static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
+                                          unsigned long expire_at,
+                                          unsigned long now,
+                                          enum rxrpc_timer_trace why)
+{
+       trace_rxrpc_timer(call, why, now);
+       timer_reduce(&call->timer, expire_at);
+}
+
 /*
  * call_object.c
  */
@@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime;
 extern struct kmem_cache *rxrpc_call_jar;
 
 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
-struct rxrpc_call *rxrpc_alloc_call(gfp_t);
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
                                         struct rxrpc_conn_parameters *,
                                         struct sockaddr_rxrpc *,
-                                        unsigned long, s64, gfp_t);
+                                        struct rxrpc_call_params *, gfp_t);
 int rxrpc_retry_client_call(struct rxrpc_sock *,
                            struct rxrpc_call *,
                            struct rxrpc_conn_parameters *,
@@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
  */
 extern unsigned int rxrpc_max_client_connections;
 extern unsigned int rxrpc_reap_client_connections;
-extern unsigned int rxrpc_conn_idle_client_expiry;
-extern unsigned int rxrpc_conn_idle_client_fast_expiry;
+extern unsigned long rxrpc_conn_idle_client_expiry;
+extern unsigned long rxrpc_conn_idle_client_fast_expiry;
 extern struct idr rxrpc_client_conn_ids;
 
 void rxrpc_destroy_client_conn_ids(void);
@@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *);
  * conn_object.c
  */
 extern unsigned int rxrpc_connection_expiry;
+extern unsigned int rxrpc_closed_conn_expiry;
 
 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
@@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
                rxrpc_put_service_conn(conn);
 }
 
+static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
+                                          unsigned long expire_at)
+{
+       timer_reduce(&conn->timer, expire_at);
+}
+
 /*
  * conn_service.c
  */
@@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local)
  * misc.c
  */
 extern unsigned int rxrpc_max_backlog __read_mostly;
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned long rxrpc_requested_ack_delay;
+extern unsigned long rxrpc_soft_ack_delay;
+extern unsigned long rxrpc_idle_ack_delay;
 extern unsigned int rxrpc_rx_window_size;
 extern unsigned int rxrpc_rx_mtu;
 extern unsigned int rxrpc_rx_jumbo_max;
-extern unsigned int rxrpc_resend_timeout;
+extern unsigned long rxrpc_resend_timeout;
 
 extern const s8 rxrpc_ack_priority[];
 
@@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
 /*
  * output.c
  */
-int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
+int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
 int rxrpc_send_abort_packet(struct rxrpc_call *);
 int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
 void rxrpc_reject_packets(struct rxrpc_local *);
index cbd1701e813a76864da2f4cc294476cce008d878..3028298ca56134e86b1ef60c9987b37490e12f19 100644 (file)
@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
        /* Now it gets complicated, because calls get registered with the
         * socket here, particularly if a user ID is preassigned by the user.
         */
-       call = rxrpc_alloc_call(gfp);
+       call = rxrpc_alloc_call(rx, gfp);
        if (!call)
                return -ENOMEM;
        call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
index 3574508baf9aa6d65dcf368e1b6de041a60ee933..bda952ffe6a6eab394e39220a6fe6a6af19c8e08 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-/*
- * Set the timer
- */
-void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-                      ktime_t now)
-{
-       unsigned long t_j, now_j = jiffies;
-       ktime_t t;
-       bool queue = false;
-
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               t = call->expire_at;
-               if (!ktime_after(t, now)) {
-                       trace_rxrpc_timer(call, why, now, now_j);
-                       queue = true;
-                       goto out;
-               }
-
-               if (!ktime_after(call->resend_at, now)) {
-                       call->resend_at = call->expire_at;
-                       if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
-                               queue = true;
-               } else if (ktime_before(call->resend_at, t)) {
-                       t = call->resend_at;
-               }
-
-               if (!ktime_after(call->ack_at, now)) {
-                       call->ack_at = call->expire_at;
-                       if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
-                               queue = true;
-               } else if (ktime_before(call->ack_at, t)) {
-                       t = call->ack_at;
-               }
-
-               if (!ktime_after(call->ping_at, now)) {
-                       call->ping_at = call->expire_at;
-                       if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
-                               queue = true;
-               } else if (ktime_before(call->ping_at, t)) {
-                       t = call->ping_at;
-               }
-
-               t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
-               t_j += jiffies;
-
-               /* We have to make sure that the calculated jiffies value falls
-                * at or after the nsec value, or we may loop ceaselessly
-                * because the timer times out, but we haven't reached the nsec
-                * timeout yet.
-                */
-               t_j++;
-
-               if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
-                       mod_timer(&call->timer, t_j);
-                       trace_rxrpc_timer(call, why, now, now_j);
-               }
-       }
-
-out:
-       if (queue)
-               rxrpc_queue_call(call);
-}
-
-/*
- * Set the timer
- */
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-                    ktime_t now)
-{
-       read_lock_bh(&call->state_lock);
-       __rxrpc_set_timer(call, why, now);
-       read_unlock_bh(&call->state_lock);
-}
-
 /*
  * Propose a PING ACK be sent.
  */
@@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
                    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
                        rxrpc_queue_call(call);
        } else {
-               ktime_t now = ktime_get_real();
-               ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
+               unsigned long now = jiffies;
+               unsigned long ping_at = now + rxrpc_idle_ack_delay;
 
-               if (ktime_before(ping_at, call->ping_at)) {
-                       call->ping_at = ping_at;
-                       rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
+               if (time_before(ping_at, call->ping_at)) {
+                       WRITE_ONCE(call->ping_at, ping_at);
+                       rxrpc_reduce_call_timer(call, ping_at, now,
+                                               rxrpc_timer_set_for_ping);
                }
        }
 }
@@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                                enum rxrpc_propose_ack_trace why)
 {
        enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
-       unsigned int expiry = rxrpc_soft_ack_delay;
-       ktime_t now, ack_at;
+       unsigned long expiry = rxrpc_soft_ack_delay;
        s8 prior = rxrpc_ack_priority[ack_reason];
 
        /* Pings are handled specially because we don't want to accidentally
@@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                    background)
                        rxrpc_queue_call(call);
        } else {
-               now = ktime_get_real();
-               ack_at = ktime_add_ms(now, expiry);
-               if (ktime_before(ack_at, call->ack_at)) {
-                       call->ack_at = ack_at;
-                       rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
+               unsigned long now = jiffies, ack_at;
+
+               if (call->peer->rtt_usage > 0)
+                       ack_at = nsecs_to_jiffies(call->peer->rtt);
+               else
+                       ack_at = expiry;
+
+               ack_at = jiffies + expiry;
+               if (time_before(ack_at, call->ack_at)) {
+                       WRITE_ONCE(call->ack_at, ack_at);
+                       rxrpc_reduce_call_timer(call, ack_at, now,
+                                               rxrpc_timer_set_for_ack);
                }
        }
 
@@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
 /*
  * Perform retransmission of NAK'd and unack'd packets.
  */
-static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
+static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
 {
        struct rxrpc_skb_priv *sp;
        struct sk_buff *skb;
+       unsigned long resend_at;
        rxrpc_seq_t cursor, seq, top;
-       ktime_t max_age, oldest, ack_ts;
+       ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
        int ix;
        u8 annotation, anno_type, retrans = 0, unacked = 0;
 
        _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
 
-       max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
+       if (call->peer->rtt_usage > 1)
+               timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
+       else
+               timeout = ms_to_ktime(rxrpc_resend_timeout);
+       min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
+       if (ktime_before(timeout, min_timeo))
+               timeout = min_timeo;
+
+       now = ktime_get_real();
+       max_age = ktime_sub(now, timeout);
 
        spin_lock_bh(&call->lock);
 
@@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
                                       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
        }
 
-       call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
+       resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
+       resend_at += jiffies + rxrpc_resend_timeout;
+       WRITE_ONCE(call->resend_at, resend_at);
 
        if (unacked)
                rxrpc_congestion_timeout(call);
@@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
         * retransmitting data.
         */
        if (!retrans) {
-               rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+               rxrpc_reduce_call_timer(call, resend_at, now,
+                                       rxrpc_timer_set_for_resend);
                spin_unlock_bh(&call->lock);
                ack_ts = ktime_sub(now, call->acks_latest_ts);
                if (ktime_to_ns(ack_ts) < call->peer->rtt)
                        goto out;
                rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
                                  rxrpc_propose_ack_ping_for_lost_ack);
-               rxrpc_send_ack_packet(call, true);
+               rxrpc_send_ack_packet(call, true, NULL);
                goto out;
        }
 
@@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work)
 {
        struct rxrpc_call *call =
                container_of(work, struct rxrpc_call, processor);
-       ktime_t now;
+       rxrpc_serial_t *send_ack;
+       unsigned long now, next, t;
 
        rxrpc_see_call(call);
 
@@ -384,22 +331,89 @@ recheck_state:
                goto out_put;
        }
 
-       now = ktime_get_real();
-       if (ktime_before(call->expire_at, now)) {
+       /* Work out if any timeouts tripped */
+       now = jiffies;
+       t = READ_ONCE(call->expect_rx_by);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
+               set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+       }
+
+       t = READ_ONCE(call->expect_req_by);
+       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+           time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
+               set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+       }
+
+       t = READ_ONCE(call->expect_term_by);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
+               set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+       }
+
+       t = READ_ONCE(call->ack_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
+               cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_ACK, &call->events);
+       }
+
+       t = READ_ONCE(call->ack_lost_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
+               cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
+       }
+
+       t = READ_ONCE(call->keepalive_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
+               cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+               rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
+                                 rxrpc_propose_ack_ping_for_keepalive);
+               set_bit(RXRPC_CALL_EV_PING, &call->events);
+       }
+
+       t = READ_ONCE(call->ping_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
+               cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_PING, &call->events);
+       }
+
+       t = READ_ONCE(call->resend_at);
+       if (time_after_eq(now, t)) {
+               trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
+               cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+               set_bit(RXRPC_CALL_EV_RESEND, &call->events);
+       }
+
+       /* Process events */
+       if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
                rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
                set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                goto recheck_state;
        }
 
-       if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
+       send_ack = NULL;
+       if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
+               call->acks_lost_top = call->tx_top;
+               rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+                                 rxrpc_propose_ack_ping_for_lost_ack);
+               send_ack = &call->acks_lost_ping;
+       }
+
+       if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
+           send_ack) {
                if (call->ackr_reason) {
-                       rxrpc_send_ack_packet(call, false);
+                       rxrpc_send_ack_packet(call, false, send_ack);
                        goto recheck_state;
                }
        }
 
        if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
-               rxrpc_send_ack_packet(call, true);
+               rxrpc_send_ack_packet(call, true, NULL);
                goto recheck_state;
        }
 
@@ -408,7 +422,24 @@ recheck_state:
                goto recheck_state;
        }
 
-       rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+       /* Make sure the timer is restarted */
+       next = call->expect_rx_by;
+
+#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+       
+       set(call->expect_req_by);
+       set(call->expect_term_by);
+       set(call->ack_at);
+       set(call->ack_lost_at);
+       set(call->resend_at);
+       set(call->keepalive_at);
+       set(call->ping_at);
+
+       now = jiffies;
+       if (time_after_eq(now, next))
+               goto recheck_state;
+
+       rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
 
        /* other events may have been raised since we started checking */
        if (call->events && call->state < RXRPC_CALL_COMPLETE) {
index 4c7fbc6dcce73167cf602bf0f2f8078522f8f443..0b2db38dd32d4c2418827236faf5219bc70cc97f 100644 (file)
@@ -45,16 +45,20 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
 
 struct kmem_cache *rxrpc_call_jar;
 
-static void rxrpc_call_timer_expired(unsigned long _call)
+static void rxrpc_call_timer_expired(struct timer_list *t)
 {
-       struct rxrpc_call *call = (struct rxrpc_call *)_call;
+       struct rxrpc_call *call = from_timer(call, t, timer);
 
        _enter("%d", call->debug_id);
 
-       if (call->state < RXRPC_CALL_COMPLETE)
-               rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
+       if (call->state < RXRPC_CALL_COMPLETE) {
+               trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
+               rxrpc_queue_call(call);
+       }
 }
 
+static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
+
 /*
  * find an extant server call
  * - called in process context with IRQs enabled
@@ -95,7 +99,7 @@ found_extant_call:
 /*
  * allocate a new call
  */
-struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
 {
        struct rxrpc_call *call;
 
@@ -114,8 +118,15 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
                goto nomem_2;
 
        mutex_init(&call->user_mutex);
-       setup_timer(&call->timer, rxrpc_call_timer_expired,
-                   (unsigned long)call);
+
+       /* Prevent lockdep reporting a deadlock false positive between the afs
+        * filesystem and sys_sendmsg() via the mmap sem.
+        */
+       if (rx->sk.sk_kern_sock)
+               lockdep_set_class(&call->user_mutex,
+                                 &rxrpc_call_user_mutex_lock_class_key);
+
+       timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
        INIT_WORK(&call->processor, &rxrpc_process_call);
        INIT_LIST_HEAD(&call->link);
        INIT_LIST_HEAD(&call->chan_wait_link);
@@ -129,6 +140,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
        atomic_set(&call->usage, 1);
        call->debug_id = atomic_inc_return(&rxrpc_debug_id);
        call->tx_total_len = -1;
+       call->next_rx_timo = 20 * HZ;
+       call->next_req_timo = 1 * HZ;
 
        memset(&call->sock_node, 0xed, sizeof(call->sock_node));
 
@@ -151,7 +164,8 @@ nomem:
 /*
  * Allocate a new client call.
  */
-static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
+static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
+                                                 struct sockaddr_rxrpc *srx,
                                                  gfp_t gfp)
 {
        struct rxrpc_call *call;
@@ -159,7 +173,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
 
        _enter("");
 
-       call = rxrpc_alloc_call(gfp);
+       call = rxrpc_alloc_call(rx, gfp);
        if (!call)
                return ERR_PTR(-ENOMEM);
        call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
@@ -178,15 +192,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
  */
 static void rxrpc_start_call_timer(struct rxrpc_call *call)
 {
-       ktime_t now = ktime_get_real(), expire_at;
-
-       expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
-       call->expire_at = expire_at;
-       call->ack_at = expire_at;
-       call->ping_at = expire_at;
-       call->resend_at = expire_at;
-       call->timer.expires = jiffies + LONG_MAX / 2;
-       rxrpc_set_timer(call, rxrpc_timer_begin, now);
+       unsigned long now = jiffies;
+       unsigned long j = now + MAX_JIFFY_OFFSET;
+
+       call->ack_at = j;
+       call->ack_lost_at = j;
+       call->resend_at = j;
+       call->ping_at = j;
+       call->expect_rx_by = j;
+       call->expect_req_by = j;
+       call->expect_term_by = j;
+       call->timer.expires = now;
 }
 
 /*
@@ -197,8 +213,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                                         struct rxrpc_conn_parameters *cp,
                                         struct sockaddr_rxrpc *srx,
-                                        unsigned long user_call_ID,
-                                        s64 tx_total_len,
+                                        struct rxrpc_call_params *p,
                                         gfp_t gfp)
        __releases(&rx->sk.sk_lock.slock)
 {
@@ -208,18 +223,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        const void *here = __builtin_return_address(0);
        int ret;
 
-       _enter("%p,%lx", rx, user_call_ID);
+       _enter("%p,%lx", rx, p->user_call_ID);
 
-       call = rxrpc_alloc_client_call(srx, gfp);
+       call = rxrpc_alloc_client_call(rx, srx, gfp);
        if (IS_ERR(call)) {
                release_sock(&rx->sk);
                _leave(" = %ld", PTR_ERR(call));
                return call;
        }
 
-       call->tx_total_len = tx_total_len;
+       call->tx_total_len = p->tx_total_len;
        trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
-                        here, (const void *)user_call_ID);
+                        here, (const void *)p->user_call_ID);
 
        /* We need to protect a partially set up call against the user as we
         * will be acting outside the socket lock.
@@ -235,16 +250,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                parent = *pp;
                xcall = rb_entry(parent, struct rxrpc_call, sock_node);
 
-               if (user_call_ID < xcall->user_call_ID)
+               if (p->user_call_ID < xcall->user_call_ID)
                        pp = &(*pp)->rb_left;
-               else if (user_call_ID > xcall->user_call_ID)
+               else if (p->user_call_ID > xcall->user_call_ID)
                        pp = &(*pp)->rb_right;
                else
                        goto error_dup_user_ID;
        }
 
        rcu_assign_pointer(call->socket, rx);
-       call->user_call_ID = user_call_ID;
+       call->user_call_ID = p->user_call_ID;
        __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
        rxrpc_get_call(call, rxrpc_call_got_userid);
        rb_link_node(&call->sock_node, parent, pp);
index 5f9624bd311c6882bc6f751b393b4904222f5667..7f74ca3059f8f45a70dd1960335415eee343b003 100644 (file)
@@ -85,8 +85,8 @@
 
 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
-__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
-__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
 
 /*
  * We use machine-unique IDs for our client connections.
@@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
 
        trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
 
+       /* Cancel the final ACK on the previous call if it hasn't been sent yet
+        * as the DATA packet will implicitly ACK it.
+        */
+       clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+
        write_lock_bh(&call->state_lock);
        if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
                call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
@@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
 
        _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
 
-       rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
+       rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
        rxrpc_cull_active_client_conns(rxnet);
 
        ret = rxrpc_get_client_conn(call, cp, srx, gfp);
@@ -751,6 +756,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
        }
 }
 
+/*
+ * Set the reap timer.
+ */
+static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+{
+       unsigned long now = jiffies;
+       unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
+
+       if (rxnet->live)
+               timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+}
+
 /*
  * Disconnect a client call.
  */
@@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
                goto out_2;
        }
 
+       /* Schedule the final ACK to be transmitted in a short while so that it
+        * can be skipped if we find a follow-on call.  The first DATA packet
+        * of the follow on call will implicitly ACK this call.
+        */
+       if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+               unsigned long final_ack_at = jiffies + 2;
+
+               WRITE_ONCE(chan->final_ack_at, final_ack_at);
+               smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
+               set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+               rxrpc_reduce_conn_timer(conn, final_ack_at);
+       }
+
        /* Things are more complex and we need the cache lock.  We might be
         * able to simply idle the conn or it might now be lurking on the wait
         * list.  It might even get moved back to the active list whilst we're
@@ -878,9 +908,7 @@ idle_connection:
                list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
                if (rxnet->idle_client_conns.next == &conn->cache_link &&
                    !rxnet->kill_all_client_conns)
-                       queue_delayed_work(rxrpc_workqueue,
-                                          &rxnet->client_conn_reaper,
-                                          rxrpc_conn_idle_client_expiry);
+                       rxrpc_set_client_reap_timer(rxnet);
        } else {
                trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
                conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
@@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_net *rxnet =
-               container_of(to_delayed_work(work),
-                            struct rxrpc_net, client_conn_reaper);
+               container_of(work, struct rxrpc_net, client_conn_reaper);
        unsigned long expiry, conn_expires_at, now;
        unsigned int nr_conns;
        bool did_discard = false;
@@ -1061,6 +1088,8 @@ next:
                expiry = rxrpc_conn_idle_client_expiry;
                if (nr_conns > rxrpc_reap_client_connections)
                        expiry = rxrpc_conn_idle_client_fast_expiry;
+               if (conn->params.local->service_closed)
+                       expiry = rxrpc_closed_conn_expiry * HZ;
 
                conn_expires_at = conn->idle_timestamp + expiry;
 
@@ -1096,9 +1125,8 @@ not_yet_expired:
         */
        _debug("not yet");
        if (!rxnet->kill_all_client_conns)
-               queue_delayed_work(rxrpc_workqueue,
-                                  &rxnet->client_conn_reaper,
-                                  conn_expires_at - now);
+               timer_reduce(&rxnet->client_conn_reap_timer,
+                            conn_expires_at);
 
 out:
        spin_unlock(&rxnet->client_conn_cache_lock);
@@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
        rxnet->kill_all_client_conns = true;
        spin_unlock(&rxnet->client_conn_cache_lock);
 
-       cancel_delayed_work(&rxnet->client_conn_reaper);
+       del_timer_sync(&rxnet->client_conn_reap_timer);
 
-       if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
+       if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
                _debug("destroy: queue failed");
 
        _leave("");
index 59a51a56e7c88e78d69dfc0184ca1230ca797a2e..9e9a8db1bc9cd0f1afd3efd7e9e26c4d2890a7d3 100644 (file)
  * Retransmit terminal ACK or ABORT of the previous call.
  */
 static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
-                                      struct sk_buff *skb)
+                                      struct sk_buff *skb,
+                                      unsigned int channel)
 {
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
        struct rxrpc_channel *chan;
        struct msghdr msg;
        struct kvec iov;
@@ -48,7 +49,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
 
        _enter("%d", conn->debug_id);
 
-       chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK];
+       chan = &conn->channels[channel];
 
        /* If the last call got moved on whilst we were waiting to run, just
         * ignore this packet.
@@ -56,7 +57,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        call_id = READ_ONCE(chan->last_call);
        /* Sync with __rxrpc_disconnect_call() */
        smp_rmb();
-       if (call_id != sp->hdr.callNumber)
+       if (skb && call_id != sp->hdr.callNumber)
                return;
 
        msg.msg_name    = &conn->params.peer->srx.transport;
@@ -65,9 +66,9 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        msg.msg_controllen = 0;
        msg.msg_flags   = 0;
 
-       pkt.whdr.epoch          = htonl(sp->hdr.epoch);
-       pkt.whdr.cid            = htonl(sp->hdr.cid);
-       pkt.whdr.callNumber     = htonl(sp->hdr.callNumber);
+       pkt.whdr.epoch          = htonl(conn->proto.epoch);
+       pkt.whdr.cid            = htonl(conn->proto.cid);
+       pkt.whdr.callNumber     = htonl(call_id);
        pkt.whdr.seq            = 0;
        pkt.whdr.type           = chan->last_type;
        pkt.whdr.flags          = conn->out_clientflag;
@@ -87,11 +88,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                mtu = conn->params.peer->if_mtu;
                mtu -= conn->params.peer->hdrsize;
                pkt.ack.bufferSpace     = 0;
-               pkt.ack.maxSkew         = htons(skb->priority);
-               pkt.ack.firstPacket     = htonl(chan->last_seq);
-               pkt.ack.previousPacket  = htonl(chan->last_seq - 1);
-               pkt.ack.serial          = htonl(sp->hdr.serial);
-               pkt.ack.reason          = RXRPC_ACK_DUPLICATE;
+               pkt.ack.maxSkew         = htons(skb ? skb->priority : 0);
+               pkt.ack.firstPacket     = htonl(chan->last_seq + 1);
+               pkt.ack.previousPacket  = htonl(chan->last_seq);
+               pkt.ack.serial          = htonl(skb ? sp->hdr.serial : 0);
+               pkt.ack.reason          = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
                pkt.ack.nAcks           = 0;
                pkt.info.rxMTU          = htonl(rxrpc_rx_mtu);
                pkt.info.maxMTU         = htonl(mtu);
@@ -272,7 +273,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
        case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_conn_retransmit_call(conn, skb);
+               rxrpc_conn_retransmit_call(conn, skb,
+                                          sp->hdr.cid & RXRPC_CHANNELMASK);
                return 0;
 
        case RXRPC_PACKET_TYPE_BUSY:
@@ -378,6 +380,48 @@ abort:
        _leave(" [aborted]");
 }
 
+/*
+ * Process delayed final ACKs that we haven't subsumed into a subsequent call.
+ */
+static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
+{
+       unsigned long j = jiffies, next_j;
+       unsigned int channel;
+       bool set;
+
+again:
+       next_j = j + LONG_MAX;
+       set = false;
+       for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
+               struct rxrpc_channel *chan = &conn->channels[channel];
+               unsigned long ack_at;
+
+               if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
+                       continue;
+
+               smp_rmb(); /* vs rxrpc_disconnect_client_call */
+               ack_at = READ_ONCE(chan->final_ack_at);
+
+               if (time_before(j, ack_at)) {
+                       if (time_before(ack_at, next_j)) {
+                               next_j = ack_at;
+                               set = true;
+                       }
+                       continue;
+               }
+
+               if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
+                                      &conn->flags))
+                       rxrpc_conn_retransmit_call(conn, NULL, channel);
+       }
+
+       j = jiffies;
+       if (time_before_eq(next_j, j))
+               goto again;
+       if (set)
+               rxrpc_reduce_conn_timer(conn, next_j);
+}
+
 /*
  * connection-level event processor
  */
@@ -394,6 +438,10 @@ void rxrpc_process_connection(struct work_struct *work)
        if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
                rxrpc_secure_connection(conn);
 
+       /* Process delayed ACKs whose time has come. */
+       if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+               rxrpc_process_delayed_final_acks(conn);
+
        /* go through the conn-level event packets, releasing the ref on this
         * connection that each one has when we've finished with it */
        while ((skb = skb_dequeue(&conn->rx_queue))) {
index fe575798592fec5945148694b54de90a3d78c3f6..1aad04a32d5e203ab17928e2247275c8b01c954d 100644 (file)
 /*
  * Time till a connection expires after last use (in seconds).
  */
-unsigned int rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
 
 static void rxrpc_destroy_connection(struct rcu_head *);
 
+static void rxrpc_connection_timer(struct timer_list *timer)
+{
+       struct rxrpc_connection *conn =
+               container_of(timer, struct rxrpc_connection, timer);
+
+       rxrpc_queue_conn(conn);
+}
+
 /*
  * allocate a new connection
  */
@@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
                INIT_LIST_HEAD(&conn->cache_link);
                spin_lock_init(&conn->channel_lock);
                INIT_LIST_HEAD(&conn->waiting_calls);
+               timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
                INIT_WORK(&conn->processor, &rxrpc_process_connection);
                INIT_LIST_HEAD(&conn->proc_link);
                INIT_LIST_HEAD(&conn->link);
@@ -300,22 +310,30 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
        return conn;
 }
 
+/*
+ * Set the service connection reap timer.
+ */
+static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
+                                        unsigned long reap_at)
+{
+       if (rxnet->live)
+               timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
+}
+
 /*
  * Release a service connection
  */
 void rxrpc_put_service_conn(struct rxrpc_connection *conn)
 {
-       struct rxrpc_net *rxnet;
        const void *here = __builtin_return_address(0);
        int n;
 
        n = atomic_dec_return(&conn->usage);
        trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
        ASSERTCMP(n, >=, 0);
-       if (n == 0) {
-               rxnet = conn->params.local->rxnet;
-               rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
-       }
+       if (n == 1)
+               rxrpc_set_service_reap_timer(conn->params.local->rxnet,
+                                            jiffies + rxrpc_connection_expiry);
 }
 
 /*
@@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
 
        _net("DESTROY CONN %d", conn->debug_id);
 
+       del_timer_sync(&conn->timer);
        rxrpc_purge_queue(&conn->rx_queue);
 
        conn->security->clear(conn);
@@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
 {
        struct rxrpc_connection *conn, *_p;
        struct rxrpc_net *rxnet =
-               container_of(to_delayed_work(work),
-                            struct rxrpc_net, service_conn_reaper);
-       unsigned long reap_older_than, earliest, idle_timestamp, now;
+               container_of(work, struct rxrpc_net, service_conn_reaper);
+       unsigned long expire_at, earliest, idle_timestamp, now;
 
        LIST_HEAD(graveyard);
 
        _enter("");
 
        now = jiffies;
-       reap_older_than = now - rxrpc_connection_expiry * HZ;
-       earliest = ULONG_MAX;
+       earliest = now + MAX_JIFFY_OFFSET;
 
        write_lock(&rxnet->conn_lock);
        list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
@@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
                if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
                        continue;
 
-               idle_timestamp = READ_ONCE(conn->idle_timestamp);
-               _debug("reap CONN %d { u=%d,t=%ld }",
-                      conn->debug_id, atomic_read(&conn->usage),
-                      (long)reap_older_than - (long)idle_timestamp);
-
-               if (time_after(idle_timestamp, reap_older_than)) {
-                       if (time_before(idle_timestamp, earliest))
-                               earliest = idle_timestamp;
-                       continue;
+               if (rxnet->live) {
+                       idle_timestamp = READ_ONCE(conn->idle_timestamp);
+                       expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
+                       if (conn->params.local->service_closed)
+                               expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
+
+                       _debug("reap CONN %d { u=%d,t=%ld }",
+                              conn->debug_id, atomic_read(&conn->usage),
+                              (long)expire_at - (long)now);
+
+                       if (time_before(now, expire_at)) {
+                               if (time_before(expire_at, earliest))
+                                       earliest = expire_at;
+                               continue;
+                       }
                }
 
                /* The usage count sits at 1 whilst the object is unused on the
@@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
                 */
                if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
                        continue;
+               trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
 
                if (rxrpc_conn_is_client(conn))
                        BUG();
@@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
        }
        write_unlock(&rxnet->conn_lock);
 
-       if (earliest != ULONG_MAX) {
-               _debug("reschedule reaper %ld", (long) earliest - now);
+       if (earliest != now + MAX_JIFFY_OFFSET) {
+               _debug("reschedule reaper %ld", (long)earliest - (long)now);
                ASSERT(time_after(earliest, now));
-               rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
-                                        earliest - now);
+               rxrpc_set_service_reap_timer(rxnet, earliest);          
        }
 
        while (!list_empty(&graveyard)) {
@@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
 
        rxrpc_destroy_all_client_connections(rxnet);
 
-       rxrpc_connection_expiry = 0;
-       cancel_delayed_work(&rxnet->client_conn_reaper);
-       rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
+       del_timer_sync(&rxnet->service_conn_reap_timer);
+       rxrpc_queue_work(&rxnet->service_conn_reaper);
        flush_workqueue(rxrpc_workqueue);
 
        write_lock(&rxnet->conn_lock);
index 1b592073ec960bb5eaac103f8717711045e49b35..23a5e61d8f79a01622c29de07061fcff94a14f3c 100644 (file)
@@ -318,16 +318,18 @@ bad_state:
 static bool rxrpc_receiving_reply(struct rxrpc_call *call)
 {
        struct rxrpc_ack_summary summary = { 0 };
+       unsigned long now, timo;
        rxrpc_seq_t top = READ_ONCE(call->tx_top);
 
        if (call->ackr_reason) {
                spin_lock_bh(&call->lock);
                call->ackr_reason = 0;
-               call->resend_at = call->expire_at;
-               call->ack_at = call->expire_at;
                spin_unlock_bh(&call->lock);
-               rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
-                               ktime_get_real());
+               now = jiffies;
+               timo = now + MAX_JIFFY_OFFSET;
+               WRITE_ONCE(call->resend_at, timo);
+               WRITE_ONCE(call->ack_at, timo);
+               trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
        }
 
        if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
@@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
        if (state >= RXRPC_CALL_COMPLETE)
                return;
 
+       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+               unsigned long timo = READ_ONCE(call->next_req_timo);
+               unsigned long now, expect_req_by;
+
+               if (timo) {
+                       now = jiffies;
+                       expect_req_by = now + timo;
+                       WRITE_ONCE(call->expect_req_by, expect_req_by);
+                       rxrpc_reduce_call_timer(call, expect_req_by, now,
+                                               rxrpc_timer_set_for_idle);
+               }
+       }
+
        /* Received data implicitly ACKs all of the request packets we sent
         * when we're acting as a client.
         */
@@ -615,6 +630,43 @@ found:
                           orig_serial, ack_serial, sent_at, resp_time);
 }
 
+/*
+ * Process the response to a ping that we sent to find out if we lost an ACK.
+ *
+ * If we got back a ping response that indicates a lower tx_top than what we
+ * had at the time of the ping transmission, we adjudge all the DATA packets
+ * sent between the response tx_top and the ping-time tx_top to have been lost.
+ */
+static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
+{
+       rxrpc_seq_t top, bottom, seq;
+       bool resend = false;
+
+       spin_lock_bh(&call->lock);
+
+       bottom = call->tx_hard_ack + 1;
+       top = call->acks_lost_top;
+       if (before(bottom, top)) {
+               for (seq = bottom; before_eq(seq, top); seq++) {
+                       int ix = seq & RXRPC_RXTX_BUFF_MASK;
+                       u8 annotation = call->rxtx_annotations[ix];
+                       u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
+
+                       if (anno_type != RXRPC_TX_ANNO_UNACK)
+                               continue;
+                       annotation &= ~RXRPC_TX_ANNO_MASK;
+                       annotation |= RXRPC_TX_ANNO_RETRANS;
+                       call->rxtx_annotations[ix] = annotation;
+                       resend = true;
+               }
+       }
+
+       spin_unlock_bh(&call->lock);
+
+       if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+               rxrpc_queue_call(call);
+}
+
 /*
  * Process a ping response.
  */
@@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
        smp_rmb();
        ping_serial = call->ping_serial;
 
+       if (orig_serial == call->acks_lost_ping)
+               rxrpc_input_check_for_lost_ack(call);
+
        if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
            before(orig_serial, ping_serial))
                return;
@@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
                                    struct sk_buff *skb, u16 skew)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       unsigned long timo;
 
        _enter("%p,%p", call, skb);
 
+       timo = READ_ONCE(call->next_rx_timo);
+       if (timo) {
+               unsigned long now = jiffies, expect_rx_by;
+
+               expect_rx_by = jiffies + timo;
+               WRITE_ONCE(call->expect_rx_by, expect_rx_by);
+               rxrpc_reduce_call_timer(call, expect_rx_by, now,
+                                       rxrpc_timer_set_for_normal);
+       }
+       
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
                rxrpc_input_data(call, skb, skew);
index 1a2d4b1120649ad4055b138ec556dad854bf37a6..c1d9e7fd7448dedb9ba7952c0a5f1b701bdb62cb 100644 (file)
  */
 unsigned int rxrpc_max_backlog __read_mostly = 10;
 
-/*
- * Maximum lifetime of a call (in mx).
- */
-unsigned int rxrpc_max_call_lifetime = 60 * 1000;
-
 /*
  * How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in ms).
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
  */
-unsigned int rxrpc_requested_ack_delay = 1;
+unsigned long rxrpc_requested_ack_delay = 1;
 
 /*
- * How long to wait before scheduling an ACK with subtype DELAY (in ms).
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
  *
  * We use this when we've received new data packets.  If those packets aren't
  * all consumed within this time we will send a DELAY ACK if an ACK was not
  * requested to let the sender know it doesn't need to resend.
  */
-unsigned int rxrpc_soft_ack_delay = 1 * 1000;
+unsigned long rxrpc_soft_ack_delay = HZ;
 
 /*
- * How long to wait before scheduling an ACK with subtype IDLE (in ms).
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
  *
  * We use this when we've consumed some previously soft-ACK'd packets when
  * further packets aren't immediately received to decide when to send an IDLE
  * ACK let the other end know that it can free up its Tx buffer space.
  */
-unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
+unsigned long rxrpc_idle_ack_delay = HZ / 2;
 
 /*
  * Receive window size in packets.  This indicates the maximum number of
@@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4;
 /*
  * Time till packet resend (in milliseconds).
  */
-unsigned int rxrpc_resend_timeout = 4 * 1000;
+unsigned long rxrpc_resend_timeout = 4 * HZ;
 
 const s8 rxrpc_ack_priority[] = {
        [0]                             = 0,
index 7edceb8522f5b3b094cefc61d219e1cfa8df279e..f18c9248e0d4c6108cb0ae82cc408e70d4aacbdb 100644 (file)
 
 unsigned int rxrpc_net_id;
 
+static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+{
+       struct rxrpc_net *rxnet =
+               container_of(timer, struct rxrpc_net, client_conn_reap_timer);
+
+       if (rxnet->live)
+               rxrpc_queue_work(&rxnet->client_conn_reaper);
+}
+
+static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
+{
+       struct rxrpc_net *rxnet =
+               container_of(timer, struct rxrpc_net, service_conn_reap_timer);
+
+       if (rxnet->live)
+               rxrpc_queue_work(&rxnet->service_conn_reaper);
+}
+
 /*
  * Initialise a per-network namespace record.
  */
@@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net)
        struct rxrpc_net *rxnet = rxrpc_net(net);
        int ret;
 
+       rxnet->live = true;
        get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
        rxnet->epoch |= RXRPC_RANDOM_EPOCH;
 
@@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net)
        INIT_LIST_HEAD(&rxnet->conn_proc_list);
        INIT_LIST_HEAD(&rxnet->service_conns);
        rwlock_init(&rxnet->conn_lock);
-       INIT_DELAYED_WORK(&rxnet->service_conn_reaper,
-                         rxrpc_service_connection_reaper);
+       INIT_WORK(&rxnet->service_conn_reaper,
+                 rxrpc_service_connection_reaper);
+       timer_setup(&rxnet->service_conn_reap_timer,
+                   rxrpc_service_conn_reap_timeout, 0);
 
        rxnet->nr_client_conns = 0;
        rxnet->nr_active_client_conns = 0;
@@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net)
        INIT_LIST_HEAD(&rxnet->waiting_client_conns);
        INIT_LIST_HEAD(&rxnet->active_client_conns);
        INIT_LIST_HEAD(&rxnet->idle_client_conns);
-       INIT_DELAYED_WORK(&rxnet->client_conn_reaper,
-                         rxrpc_discard_expired_client_conns);
+       INIT_WORK(&rxnet->client_conn_reaper,
+                 rxrpc_discard_expired_client_conns);
+       timer_setup(&rxnet->client_conn_reap_timer,
+                   rxrpc_client_conn_reap_timeout, 0);
 
        INIT_LIST_HEAD(&rxnet->local_endpoints);
        mutex_init(&rxnet->local_mutex);
@@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net)
        return 0;
 
 err_proc:
+       rxnet->live = false;
        return ret;
 }
 
@@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
 {
        struct rxrpc_net *rxnet = rxrpc_net(net);
 
+       rxnet->live = false;
        rxrpc_destroy_all_calls(rxnet);
        rxrpc_destroy_all_connections(rxnet);
        rxrpc_destroy_all_locals(rxnet);
index f47659c7b224ef910fcaa7f8c9271d449ae3eada..42410e910affbdf39691a0cb080c053b1e8aa661 100644 (file)
@@ -32,6 +32,24 @@ struct rxrpc_abort_buffer {
        __be32 abort_code;
 };
 
+/*
+ * Arrange for a keepalive ping a certain time after we last transmitted.  This
+ * lets the far side know we're still interested in this call and helps keep
+ * the route through any intervening firewall open.
+ *
+ * Receiving a response to the ping will prevent the ->expect_rx_by timer from
+ * expiring.
+ */
+static void rxrpc_set_keepalive(struct rxrpc_call *call)
+{
+       unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+
+       keepalive_at += now;
+       WRITE_ONCE(call->keepalive_at, keepalive_at);
+       rxrpc_reduce_call_timer(call, keepalive_at, now,
+                               rxrpc_timer_set_for_keepalive);
+}
+
 /*
  * Fill out an ACK packet.
  */
@@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
 /*
  * Send an ACK call packet.
  */
-int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
+int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
+                         rxrpc_serial_t *_serial)
 {
        struct rxrpc_connection *conn = NULL;
        struct rxrpc_ack_buffer *pkt;
@@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
                           ntohl(pkt->ack.firstPacket),
                           ntohl(pkt->ack.serial),
                           pkt->ack.reason, pkt->ack.nAcks);
+       if (_serial)
+               *_serial = serial;
 
        if (ping) {
                call->ping_serial = serial;
@@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
                                call->ackr_seen = top;
                        spin_unlock_bh(&call->lock);
                }
+
+               rxrpc_set_keepalive(call);
        }
 
 out:
@@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
         * ACKs if a DATA packet appears to have been lost.
         */
        if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
-           (retrans ||
+           (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
+            retrans ||
             call->cong_mode == RXRPC_CALL_SLOW_START ||
             (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
             ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
@@ -370,8 +394,23 @@ done:
                if (whdr.flags & RXRPC_REQUEST_ACK) {
                        call->peer->rtt_last_req = now;
                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
+                       if (call->peer->rtt_usage > 1) {
+                               unsigned long nowj = jiffies, ack_lost_at;
+
+                               ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
+                               if (ack_lost_at < 1)
+                                       ack_lost_at = 1;
+
+                               ack_lost_at += nowj;
+                               WRITE_ONCE(call->ack_lost_at, ack_lost_at);
+                               rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
+                                                       rxrpc_timer_set_for_lost_ack);
+                       }
                }
        }
+
+       rxrpc_set_keepalive(call);
+
        _leave(" = %d [%u]", ret, call->peer->maxdata);
        return ret;
 
index 8510a98b87e1e224b5ba524f2fe75a21d9b41370..cc21e8db25b0b730bbe66dbee6d358177df9ae09 100644 (file)
@@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
        trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
        ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
 
+#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
        if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
                rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
                                  rxrpc_propose_ack_terminal_ack);
-               rxrpc_send_ack_packet(call, false);
+               rxrpc_send_ack_packet(call, false, NULL);
        }
+#endif
 
        write_lock_bh(&call->state_lock);
 
@@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
        case RXRPC_CALL_SERVER_RECV_REQUEST:
                call->tx_phase = true;
                call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
-               call->ack_at = call->expire_at;
+               call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
                write_unlock_bh(&call->state_lock);
                rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
                                  rxrpc_propose_ack_processing_op);
@@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
                    after_eq(top, call->ackr_seen + 2) ||
                    (hard_ack == top && after(hard_ack, call->ackr_consumed)))
                        rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
-                                         true, false,
+                                         true, true,
                                          rxrpc_propose_ack_rotate_rx);
-               if (call->ackr_reason)
-                       rxrpc_send_ack_packet(call, false);
+               if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
+                       rxrpc_send_ack_packet(call, false, NULL);
        }
 }
 
index 7d2595582c094cf4b30a93e1148227d6480c202e..a1c53ac066a10bda169b0222b6d6177066c6dca9 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-enum rxrpc_command {
-       RXRPC_CMD_SEND_DATA,            /* send data message */
-       RXRPC_CMD_SEND_ABORT,           /* request abort generation */
-       RXRPC_CMD_ACCEPT,               /* [server] accept incoming call */
-       RXRPC_CMD_REJECT_BUSY,          /* [server] reject a call as busy */
-};
-
-struct rxrpc_send_params {
-       s64                     tx_total_len;   /* Total Tx data length (if send data) */
-       unsigned long           user_call_ID;   /* User's call ID */
-       u32                     abort_code;     /* Abort code to Tx (if abort) */
-       enum rxrpc_command      command : 8;    /* The command to implement */
-       bool                    exclusive;      /* Shared or exclusive call */
-       bool                    upgrade;        /* If the connection is upgradeable */
-};
-
 /*
  * Wait for space to appear in the Tx queue or a signal to occur.
  */
@@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                               rxrpc_notify_end_tx_t notify_end_tx)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       unsigned long now;
        rxrpc_seq_t seq = sp->hdr.seq;
        int ret, ix;
        u8 annotation = RXRPC_TX_ANNO_UNACK;
@@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                        break;
                case RXRPC_CALL_SERVER_ACK_REQUEST:
                        call->state = RXRPC_CALL_SERVER_SEND_REPLY;
-                       call->ack_at = call->expire_at;
+                       now = jiffies;
+                       WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
                        if (call->ackr_reason == RXRPC_ACK_DELAY)
                                call->ackr_reason = 0;
-                       __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
-                                         ktime_get_real());
+                       trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
                        if (!last)
                                break;
                        /* Fall through */
@@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                _debug("need instant resend %d", ret);
                rxrpc_instant_resend(call, ix);
        } else {
-               ktime_t now = ktime_get_real(), resend_at;
-
-               resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
-
-               if (ktime_before(resend_at, call->resend_at)) {
-                       call->resend_at = resend_at;
-                       rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
-               }
+               unsigned long now = jiffies, resend_at;
+
+               if (call->peer->rtt_usage > 1)
+                       resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
+               else
+                       resend_at = rxrpc_resend_timeout;
+               if (resend_at < 1)
+                       resend_at = 1;
+
+               resend_at = now + rxrpc_resend_timeout;
+               WRITE_ONCE(call->resend_at, resend_at);
+               rxrpc_reduce_call_timer(call, resend_at, now,
+                                       rxrpc_timer_set_for_send);
        }
 
        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
@@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
        do {
                /* Check to see if there's a ping ACK to reply to. */
                if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
-                       rxrpc_send_ack_packet(call, false);
+                       rxrpc_send_ack_packet(call, false, NULL);
 
                if (!skb) {
                        size_t size, chunk, max, space;
@@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
                        if (msg->msg_flags & MSG_CMSG_COMPAT) {
                                if (len != sizeof(u32))
                                        return -EINVAL;
-                               p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
+                               p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
                        } else {
                                if (len != sizeof(unsigned long))
                                        return -EINVAL;
-                               p->user_call_ID = *(unsigned long *)
+                               p->call.user_call_ID = *(unsigned long *)
                                        CMSG_DATA(cmsg);
                        }
                        got_user_ID = true;
@@ -522,11 +512,24 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
                        break;
 
                case RXRPC_TX_LENGTH:
-                       if (p->tx_total_len != -1 || len != sizeof(__s64))
+                       if (p->call.tx_total_len != -1 || len != sizeof(__s64))
+                               return -EINVAL;
+                       p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
+                       if (p->call.tx_total_len < 0)
                                return -EINVAL;
-                       p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
-                       if (p->tx_total_len < 0)
+                       break;
+
+               case RXRPC_SET_CALL_TIMEOUT:
+                       if (len & 3 || len < 4 || len > 12)
                                return -EINVAL;
+                       memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
+                       p->call.nr_timeouts = len / 4;
+                       if (p->call.timeouts.hard > INT_MAX / HZ)
+                               return -ERANGE;
+                       if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
+                               return -ERANGE;
+                       if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
+                               return -ERANGE;
                        break;
 
                default:
@@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
 
        if (!got_user_ID)
                return -EINVAL;
-       if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
+       if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
                return -EINVAL;
        _leave(" = 0");
        return 0;
@@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
        cp.exclusive            = rx->exclusive | p->exclusive;
        cp.upgrade              = p->upgrade;
        cp.service_id           = srx->srx_service;
-       call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
-                                    p->tx_total_len, GFP_KERNEL);
+       call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
        /* The socket is now unlocked */
 
        _leave(" = %p\n", call);
@@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
 {
        enum rxrpc_call_state state;
        struct rxrpc_call *call;
+       unsigned long now, j;
        int ret;
 
        struct rxrpc_send_params p = {
-               .tx_total_len   = -1,
-               .user_call_ID   = 0,
-               .abort_code     = 0,
-               .command        = RXRPC_CMD_SEND_DATA,
-               .exclusive      = false,
-               .upgrade        = true,
+               .call.tx_total_len      = -1,
+               .call.user_call_ID      = 0,
+               .call.nr_timeouts       = 0,
+               .abort_code             = 0,
+               .command                = RXRPC_CMD_SEND_DATA,
+               .exclusive              = false,
+               .upgrade                = false,
        };
 
        _enter("");
@@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                ret = -EINVAL;
                if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
                        goto error_release_sock;
-               call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
+               call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
                /* The socket is now unlocked. */
                if (IS_ERR(call))
                        return PTR_ERR(call);
-               rxrpc_put_call(call, rxrpc_call_put);
-               return 0;
+               ret = 0;
+               goto out_put_unlock;
        }
 
-       call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
+       call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
        if (!call) {
                ret = -EBADSLT;
                if (p.command != RXRPC_CMD_SEND_DATA)
@@ -653,14 +657,39 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                        goto error_put;
                }
 
-               if (p.tx_total_len != -1) {
+               if (p.call.tx_total_len != -1) {
                        ret = -EINVAL;
                        if (call->tx_total_len != -1 ||
                            call->tx_pending ||
                            call->tx_top != 0)
                                goto error_put;
-                       call->tx_total_len = p.tx_total_len;
+                       call->tx_total_len = p.call.tx_total_len;
+               }
+       }
+
+       switch (p.call.nr_timeouts) {
+       case 3:
+               j = msecs_to_jiffies(p.call.timeouts.normal);
+               if (p.call.timeouts.normal > 0 && j == 0)
+                       j = 1;
+               WRITE_ONCE(call->next_rx_timo, j);
+               /* Fall through */
+       case 2:
+               j = msecs_to_jiffies(p.call.timeouts.idle);
+               if (p.call.timeouts.idle > 0 && j == 0)
+                       j = 1;
+               WRITE_ONCE(call->next_req_timo, j);
+               /* Fall through */
+       case 1:
+               if (p.call.timeouts.hard > 0) {
+                       j = msecs_to_jiffies(p.call.timeouts.hard);
+                       now = jiffies;
+                       j += now;
+                       WRITE_ONCE(call->expect_term_by, j);
+                       rxrpc_reduce_call_timer(call, j, now,
+                                               rxrpc_timer_set_for_hard);
                }
+               break;
        }
 
        state = READ_ONCE(call->state);
@@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                ret = rxrpc_send_data(rx, call, msg, len, NULL);
        }
 
+out_put_unlock:
        mutex_unlock(&call->user_mutex);
 error_put:
        rxrpc_put_call(call, rxrpc_call_put);
index 34c706d2f79c695076f228facc4b378ce37296cf..4a7af7aff37d247f84bfdd250bbc592ae1bb67b2 100644 (file)
@@ -21,6 +21,8 @@ static const unsigned int four = 4;
 static const unsigned int thirtytwo = 32;
 static const unsigned int n_65535 = 65535;
 static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
+static const unsigned long one_jiffy = 1;
+static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
 
 /*
  * RxRPC operating parameters.
@@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
  * information on the individual parameters.
  */
 static struct ctl_table rxrpc_sysctl_table[] = {
-       /* Values measured in milliseconds */
+       /* Values measured in milliseconds but used in jiffies */
        {
                .procname       = "req_ack_delay",
                .data           = &rxrpc_requested_ack_delay,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&zero,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "soft_ack_delay",
                .data           = &rxrpc_soft_ack_delay,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "idle_ack_delay",
                .data           = &rxrpc_idle_ack_delay,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
-       },
-       {
-               .procname       = "resend_timeout",
-               .data           = &rxrpc_resend_timeout,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "idle_conn_expiry",
                .data           = &rxrpc_conn_idle_client_expiry,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_ms_jiffies,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
        {
                .procname       = "idle_conn_fast_expiry",
                .data           = &rxrpc_conn_idle_client_fast_expiry,
-               .maxlen         = sizeof(unsigned int),
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_ms_jiffies,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
-
-       /* Values measured in seconds but used in jiffies */
        {
-               .procname       = "max_call_lifetime",
-               .data           = &rxrpc_max_call_lifetime,
-               .maxlen         = sizeof(unsigned int),
+               .procname       = "resend_timeout",
+               .data           = &rxrpc_resend_timeout,
+               .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-               .extra1         = (void *)&one,
+               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
+               .extra1         = (void *)&one_jiffy,
+               .extra2         = (void *)&max_jiffies,
        },
 
        /* Non-time values */
index 1c40caadcff959ba0c6cec6b8e32f7b459c42cfa..d836f998117b2417548b22a73940300405ce65b8 100644 (file)
@@ -229,6 +229,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
        const struct iphdr *iph;
        u16 ul;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+               return 1;
+
        /*
         * Support both UDP and UDPLITE checksum algorithms, Don't use
         * udph->len to get the real length without any protocol check,
@@ -282,6 +285,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
        const struct ipv6hdr *ip6h;
        u16 ul;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+               return 1;
+
        /*
         * Support both UDP and UDPLITE checksum algorithms, Don't use
         * udph->len to get the real length without any protocol check,
index ab255b421781b86d5b76dd1e67b8473e5a928af0..ddcf04b4ab43732c001869f70d63ea193768ebc3 100644 (file)
@@ -205,13 +205,14 @@ static void tcf_chain_head_change(struct tcf_chain *chain,
 
 static void tcf_chain_flush(struct tcf_chain *chain)
 {
-       struct tcf_proto *tp;
+       struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
 
        tcf_chain_head_change(chain, NULL);
-       while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
+       while (tp) {
                RCU_INIT_POINTER(chain->filter_chain, tp->next);
-               tcf_chain_put(chain);
                tcf_proto_destroy(tp);
+               tp = rtnl_dereference(chain->filter_chain);
+               tcf_chain_put(chain);
        }
 }
 
@@ -335,7 +336,8 @@ static void tcf_block_put_final(struct work_struct *work)
        struct tcf_chain *chain, *tmp;
 
        rtnl_lock();
-       /* Only chain 0 should be still here. */
+
+       /* At this point, all the chains should have refcnt == 1. */
        list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
                tcf_chain_put(chain);
        rtnl_unlock();
@@ -343,15 +345,21 @@ static void tcf_block_put_final(struct work_struct *work)
 }
 
 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
- * actions should be all removed after flushing. However, filters are now
- * destroyed in tc filter workqueue with RTNL lock, they can not race here.
+ * actions should be all removed after flushing.
  */
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
                       struct tcf_block_ext_info *ei)
 {
-       struct tcf_chain *chain, *tmp;
+       struct tcf_chain *chain;
 
-       list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+       /* Hold a refcnt for all chains, except 0, so that they don't disappear
+        * while we are iterating.
+        */
+       list_for_each_entry(chain, &block->chain_list, list)
+               if (chain->index)
+                       tcf_chain_hold(chain);
+
+       list_for_each_entry(chain, &block->chain_list, list)
                tcf_chain_flush(chain);
 
        tcf_block_offload_unbind(block, q, ei);
index fb680dafac5a2e49515ab84ecc820c592bca4ae0..6fe798c2df1a5303cd61cd3ad53cd2f9385d16de 100644 (file)
@@ -258,11 +258,8 @@ static int cls_bpf_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
 {
-       tcf_exts_destroy(&prog->exts);
-       tcf_exts_put_net(&prog->exts);
-
        if (cls_bpf_is_ebpf(prog))
                bpf_prog_put(prog->filter);
        else
@@ -270,6 +267,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
 
        kfree(prog->bpf_name);
        kfree(prog->bpf_ops);
+}
+
+static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+{
+       tcf_exts_destroy(&prog->exts);
+       tcf_exts_put_net(&prog->exts);
+
+       cls_bpf_free_parms(prog);
        kfree(prog);
 }
 
@@ -382,15 +387,13 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
 {
        struct bpf_prog *fp;
        char *name = NULL;
+       bool skip_sw;
        u32 bpf_fd;
 
        bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
+       skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
 
-       if (gen_flags & TCA_CLS_FLAGS_SKIP_SW)
-               fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS,
-                                          qdisc_dev(tp->q));
-       else
-               fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
+       fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
        if (IS_ERR(fp))
                return PTR_ERR(fp);
 
@@ -516,12 +519,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                goto errout_idr;
 
        ret = cls_bpf_offload(tp, prog, oldprog);
-       if (ret) {
-               if (!oldprog)
-                       idr_remove_ext(&head->handle_idr, prog->handle);
-               __cls_bpf_delete_prog(prog);
-               return ret;
-       }
+       if (ret)
+               goto errout_parms;
 
        if (!tc_in_hw(prog->gen_flags))
                prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
@@ -539,6 +538,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
        *arg = prog;
        return 0;
 
+errout_parms:
+       cls_bpf_free_parms(prog);
 errout_idr:
        if (!oldprog)
                idr_remove_ext(&head->handle_idr, prog->handle);
index 6361be7881f108c03335e0245448682f960587d6..525eb3a6d625164e9193da65dd67795d6cdd3cc2 100644 (file)
@@ -1158,9 +1158,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
                return -EINVAL;
 
+       err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+       if (err)
+               goto put_rtab;
+
        err = qdisc_class_hash_init(&q->clhash);
        if (err < 0)
-               goto put_rtab;
+               goto put_block;
 
        q->link.sibling = &q->link;
        q->link.common.classid = sch->handle;
@@ -1194,6 +1198,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        cbq_addprio(q, &q->link);
        return 0;
 
+put_block:
+       tcf_block_put(q->link.block);
+
 put_rtab:
        qdisc_put_rtab(q->link.R_tab);
        return err;
index 890f4a4564e71355329b7372a3769431ce62dc0b..09c1203c17119829d183fbdd0dfe9757460b863e 100644 (file)
@@ -724,6 +724,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        int i;
        int err;
 
+       q->sch = sch;
        timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
 
        err = tcf_block_get(&q->block, &q->filter_list, sch);
index a6dfa86c02016e3ff81f10f729a56e6673affc68..3b18085e3b10253f3f81be7a6747b50ef9357db2 100644 (file)
@@ -807,9 +807,10 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
                addr->v6.sin6_flowinfo = 0;
                addr->v6.sin6_port = sh->source;
                addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
-               if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
+               if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
                        addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb);
-               }
+               else
+                       addr->v6.sin6_scope_id = 0;
        }
 
        *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);
index f5172c21349bec865f3812ba63aeb93b6c4fbab9..6a38c250364980228f00d81c667668409843a499 100644 (file)
@@ -1499,6 +1499,7 @@ static __init int sctp_init(void)
        INIT_LIST_HEAD(&sctp_address_families);
        sctp_v4_pf_init();
        sctp_v6_pf_init();
+       sctp_sched_ops_init();
 
        status = register_pernet_subsys(&sctp_defaults_ops);
        if (status)
index 514465b03829b18c18ae3e890e0899138035bd7a..9bf575f2e8ed0888e0219a872e84018ada5064e0 100644 (file)
@@ -3594,8 +3594,8 @@ struct sctp_chunk *sctp_make_strreset_req(
                                        __u16 stream_num, __be16 *stream_list,
                                        bool out, bool in)
 {
+       __u16 stream_len = stream_num * sizeof(__u16);
        struct sctp_strreset_outreq outreq;
-       __u16 stream_len = stream_num * 2;
        struct sctp_strreset_inreq inreq;
        struct sctp_chunk *retval;
        __u16 outlen, inlen;
index b029757bea03edc389494882759325f60ca0ec44..014847e25648182dbf99d8fb095e094af76264bb 100644 (file)
@@ -84,8 +84,8 @@
 /* Forward declarations for internal helper functions. */
 static int sctp_writeable(struct sock *sk);
 static void sctp_wfree(struct sk_buff *skb);
-static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
-                               size_t msg_len);
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+                               size_t msg_len, struct sock **orig_sk);
 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
 static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -188,13 +188,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
                list_for_each_entry(chunk, &t->transmitted, transmitted_list)
                        cb(chunk);
 
-       list_for_each_entry(chunk, &q->retransmit, list)
+       list_for_each_entry(chunk, &q->retransmit, transmitted_list)
                cb(chunk);
 
-       list_for_each_entry(chunk, &q->sacked, list)
+       list_for_each_entry(chunk, &q->sacked, transmitted_list)
                cb(chunk);
 
-       list_for_each_entry(chunk, &q->abandoned, list)
+       list_for_each_entry(chunk, &q->abandoned, transmitted_list)
                cb(chunk);
 
        list_for_each_entry(chunk, &q->out_chunk_list, list)
@@ -1970,9 +1970,16 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
        if (!sctp_wspace(asoc)) {
-               err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
-               if (err)
+               /* sk can be changed by peel off when waiting for buf. */
+               err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
+               if (err) {
+                       if (err == -ESRCH) {
+                               /* asoc is already dead. */
+                               new_asoc = NULL;
+                               err = -EPIPE;
+                       }
                        goto out_free;
+               }
        }
 
        /* If an address is passed with the sendto/sendmsg call, it is used
@@ -3133,9 +3140,9 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsign
  */
 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
 {
+       struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_assoc_value params;
        struct sctp_association *asoc;
-       struct sctp_sock *sp = sctp_sk(sk);
        int val;
 
        if (optlen == sizeof(int)) {
@@ -3151,26 +3158,35 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
                if (copy_from_user(&params, optval, optlen))
                        return -EFAULT;
                val = params.assoc_value;
-       } else
+       } else {
                return -EINVAL;
+       }
 
-       if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
-               return -EINVAL;
+       if (val) {
+               int min_len, max_len;
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id && sctp_style(sk, UDP))
-               return -EINVAL;
+               min_len = SCTP_DEFAULT_MINSEGMENT - sp->pf->af->net_header_len;
+               min_len -= sizeof(struct sctphdr) +
+                          sizeof(struct sctp_data_chunk);
+
+               max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk);
 
+               if (val < min_len || val > max_len)
+                       return -EINVAL;
+       }
+
+       asoc = sctp_id2assoc(sk, params.assoc_id);
        if (asoc) {
                if (val == 0) {
-                       val = asoc->pathmtu;
-                       val -= sp->pf->af->net_header_len;
+                       val = asoc->pathmtu - sp->pf->af->net_header_len;
                        val -= sizeof(struct sctphdr) +
-                                       sizeof(struct sctp_data_chunk);
+                              sizeof(struct sctp_data_chunk);
                }
                asoc->user_frag = val;
                asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
        } else {
+               if (params.assoc_id && sctp_style(sk, UDP))
+                       return -EINVAL;
                sp->user_frag = val;
        }
 
@@ -5015,12 +5031,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
        if (!asoc)
                return -EINVAL;
 
-       /* If there is a thread waiting on more sndbuf space for
-        * sending on this asoc, it cannot be peeled.
-        */
-       if (waitqueue_active(&asoc->wait))
-               return -EBUSY;
-
        /* An association cannot be branched off from an already peeled-off
         * socket, nor is this supported for tcp style sockets.
         */
@@ -7989,7 +7999,7 @@ void sctp_sock_rfree(struct sk_buff *skb)
 
 /* Helper function to wait for space in the sndbuf.  */
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
-                               size_t msg_len)
+                               size_t msg_len, struct sock **orig_sk)
 {
        struct sock *sk = asoc->base.sk;
        int err = 0;
@@ -8006,10 +8016,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
        for (;;) {
                prepare_to_wait_exclusive(&asoc->wait, &wait,
                                          TASK_INTERRUPTIBLE);
+               if (asoc->base.dead)
+                       goto do_dead;
                if (!*timeo_p)
                        goto do_nonblock;
-               if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
-                   asoc->base.dead)
+               if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
                        goto do_error;
                if (signal_pending(current))
                        goto do_interrupted;
@@ -8022,11 +8033,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
                lock_sock(sk);
+               if (sk != asoc->base.sk) {
+                       release_sock(sk);
+                       sk = asoc->base.sk;
+                       lock_sock(sk);
+               }
 
                *timeo_p = current_timeo;
        }
 
 out:
+       *orig_sk = sk;
        finish_wait(&asoc->wait, &wait);
 
        /* Release the association's refcnt.  */
@@ -8034,6 +8051,10 @@ out:
 
        return err;
 
+do_dead:
+       err = -ESRCH;
+       goto out;
+
 do_error:
        err = -EPIPE;
        goto out;
index b8c8cabb1a5844a6da377324de77ddf8a3000498..76ea66be0bbee7d3f018676d52c8b95ba06dbcb1 100644 (file)
@@ -64,7 +64,7 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
                 */
 
                /* Mark as failed send. */
-               sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM);
+               sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
                if (asoc->peer.prsctp_capable &&
                    SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
                        asoc->sent_cnt_removable--;
@@ -254,6 +254,30 @@ static int sctp_send_reconf(struct sctp_association *asoc,
        return retval;
 }
 
+static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
+                                     __u16 str_nums, __be16 *str_list)
+{
+       struct sctp_association *asoc;
+       __u16 i;
+
+       asoc = container_of(stream, struct sctp_association, stream);
+       if (!asoc->outqueue.out_qlen)
+               return true;
+
+       if (!str_nums)
+               return false;
+
+       for (i = 0; i < str_nums; i++) {
+               __u16 sid = ntohs(str_list[i]);
+
+               if (stream->out[sid].ext &&
+                   !list_empty(&stream->out[sid].ext->outq))
+                       return false;
+       }
+
+       return true;
+}
+
 int sctp_send_reset_streams(struct sctp_association *asoc,
                            struct sctp_reset_streams *params)
 {
@@ -282,15 +306,31 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
 
        str_nums = params->srs_number_streams;
        str_list = params->srs_stream_list;
-       if (out && str_nums)
-               for (i = 0; i < str_nums; i++)
-                       if (str_list[i] >= stream->outcnt)
-                               goto out;
+       if (str_nums) {
+               int param_len = 0;
 
-       if (in && str_nums)
-               for (i = 0; i < str_nums; i++)
-                       if (str_list[i] >= stream->incnt)
-                               goto out;
+               if (out) {
+                       for (i = 0; i < str_nums; i++)
+                               if (str_list[i] >= stream->outcnt)
+                                       goto out;
+
+                       param_len = str_nums * sizeof(__u16) +
+                                   sizeof(struct sctp_strreset_outreq);
+               }
+
+               if (in) {
+                       for (i = 0; i < str_nums; i++)
+                               if (str_list[i] >= stream->incnt)
+                                       goto out;
+
+                       param_len += str_nums * sizeof(__u16) +
+                                    sizeof(struct sctp_strreset_inreq);
+               }
+
+               if (param_len > SCTP_MAX_CHUNK_LEN -
+                               sizeof(struct sctp_reconf_chunk))
+                       goto out;
+       }
 
        nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
        if (!nstr_list) {
@@ -301,6 +341,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
        for (i = 0; i < str_nums; i++)
                nstr_list[i] = htons(str_list[i]);
 
+       if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+               retval = -EAGAIN;
+               goto out;
+       }
+
        chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
 
        kfree(nstr_list);
@@ -361,6 +406,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
        if (asoc->strreset_outstanding)
                return -EINPROGRESS;
 
+       if (!sctp_outq_is_empty(&asoc->outqueue))
+               return -EAGAIN;
+
        chunk = sctp_make_strreset_tsnreq(asoc);
        if (!chunk)
                return -ENOMEM;
@@ -547,7 +595,7 @@ struct sctp_chunk *sctp_process_strreset_outreq(
                flags = SCTP_STREAM_RESET_INCOMING_SSN;
        }
 
-       nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2;
+       nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
        if (nums) {
                str_p = outreq->list_of_streams;
                for (i = 0; i < nums; i++) {
@@ -611,7 +659,7 @@ struct sctp_chunk *sctp_process_strreset_inreq(
                goto out;
        }
 
-       nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2;
+       nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
        str_p = inreq->list_of_streams;
        for (i = 0; i < nums; i++) {
                if (ntohs(str_p[i]) >= stream->outcnt) {
@@ -620,6 +668,12 @@ struct sctp_chunk *sctp_process_strreset_inreq(
                }
        }
 
+       if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
+               result = SCTP_STRRESET_IN_PROGRESS;
+               asoc->strreset_inseq--;
+               goto err;
+       }
+
        chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
        if (!chunk)
                goto out;
@@ -671,12 +725,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
                i = asoc->strreset_inseq - request_seq - 1;
                result = asoc->strreset_result[i];
                if (result == SCTP_STRRESET_PERFORMED) {
-                       next_tsn = asoc->next_tsn;
+                       next_tsn = asoc->ctsn_ack_point + 1;
                        init_tsn =
                                sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
                }
                goto err;
        }
+
+       if (!sctp_outq_is_empty(&asoc->outqueue)) {
+               result = SCTP_STRRESET_IN_PROGRESS;
+               goto err;
+       }
+
        asoc->strreset_inseq++;
 
        if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
@@ -687,9 +747,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
                goto out;
        }
 
-       /* G3: The same processing as though a SACK chunk with no gap report
-        *     and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
-        *     received MUST be performed.
+       /* G4: The same processing as though a FWD-TSN chunk (as defined in
+        *     [RFC3758]) with all streams affected and a new cumulative TSN
+        *     ACK of the Receiver's Next TSN minus 1 were received MUST be
+        *     performed.
         */
        max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
        sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
@@ -704,10 +765,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
        sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
                         init_tsn, GFP_ATOMIC);
 
-       /* G4: The same processing as though a FWD-TSN chunk (as defined in
-        *     [RFC3758]) with all streams affected and a new cumulative TSN
-        *     ACK of the Receiver's Next TSN minus 1 were received MUST be
-        *     performed.
+       /* G3: The same processing as though a SACK chunk with no gap report
+        *     and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+        *     received MUST be performed.
         */
        sctp_outq_free(&asoc->outqueue);
 
@@ -911,7 +971,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
                outreq = (struct sctp_strreset_outreq *)req;
                str_p = outreq->list_of_streams;
-               nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
+               nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
+                      sizeof(__u16);
 
                if (result == SCTP_STRRESET_PERFORMED) {
                        if (nums) {
@@ -940,7 +1001,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
                inreq = (struct sctp_strreset_inreq *)req;
                str_p = inreq->list_of_streams;
-               nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
+               nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
+                      sizeof(__u16);
 
                *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
                        nums, str_p, GFP_ATOMIC);
@@ -959,6 +1021,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
                if (result == SCTP_STRRESET_PERFORMED) {
                        __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
                                                &asoc->peer.tsn_map);
+                       LIST_HEAD(temp);
 
                        sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
                        sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
@@ -967,7 +1030,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
                                         SCTP_TSN_MAP_INITIAL,
                                         stsn, GFP_ATOMIC);
 
+                       /* Clean up sacked and abandoned queues only. As the
+                        * out_chunk_list may not be empty, splice it to temp,
+                        * then get it back after sctp_outq_free is done.
+                        */
+                       list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
                        sctp_outq_free(&asoc->outqueue);
+                       list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
 
                        asoc->next_tsn = rtsn;
                        asoc->ctsn_ack_point = asoc->next_tsn - 1;
index 0b83ec51e43b07524dd6eb6c4746a755d464a7ef..d8c162a4089cab87f8cf14288881567e09bf4721 100644 (file)
@@ -119,16 +119,27 @@ static struct sctp_sched_ops sctp_sched_fcfs = {
        .unsched_all = sctp_sched_fcfs_unsched_all,
 };
 
+static void sctp_sched_ops_fcfs_init(void)
+{
+       sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
+}
+
 /* API to other parts of the stack */
 
-extern struct sctp_sched_ops sctp_sched_prio;
-extern struct sctp_sched_ops sctp_sched_rr;
+static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
 
-static struct sctp_sched_ops *sctp_sched_ops[] = {
-       &sctp_sched_fcfs,
-       &sctp_sched_prio,
-       &sctp_sched_rr,
-};
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+                            struct sctp_sched_ops *sched_ops)
+{
+       sctp_sched_ops[sched] = sched_ops;
+}
+
+void sctp_sched_ops_init(void)
+{
+       sctp_sched_ops_fcfs_init();
+       sctp_sched_ops_prio_init();
+       sctp_sched_ops_rr_init();
+}
 
 int sctp_sched_set_sched(struct sctp_association *asoc,
                         enum sctp_sched_type sched)
index 384dbf3c876096e2ad98a6b6185d9da5cc4145c6..7997d35dd0fdfbd9a9a3180762dd3c7be556b1af 100644 (file)
@@ -333,7 +333,7 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream)
                        sctp_sched_prio_unsched(soute);
 }
 
-struct sctp_sched_ops sctp_sched_prio = {
+static struct sctp_sched_ops sctp_sched_prio = {
        .set = sctp_sched_prio_set,
        .get = sctp_sched_prio_get,
        .init = sctp_sched_prio_init,
@@ -345,3 +345,8 @@ struct sctp_sched_ops sctp_sched_prio = {
        .sched_all = sctp_sched_prio_sched_all,
        .unsched_all = sctp_sched_prio_unsched_all,
 };
+
+void sctp_sched_ops_prio_init(void)
+{
+       sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio);
+}
index 7612a438c5b939ae1c26c4acc06902749b601524..1155692448f1aecf87095b379a753747ec303782 100644 (file)
@@ -187,7 +187,7 @@ static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
                sctp_sched_rr_unsched(stream, soute);
 }
 
-struct sctp_sched_ops sctp_sched_rr = {
+static struct sctp_sched_ops sctp_sched_rr = {
        .set = sctp_sched_rr_set,
        .get = sctp_sched_rr_get,
        .init = sctp_sched_rr_init,
@@ -199,3 +199,8 @@ struct sctp_sched_ops sctp_sched_rr = {
        .sched_all = sctp_sched_rr_sched_all,
        .unsched_all = sctp_sched_rr_unsched_all,
 };
+
+void sctp_sched_ops_rr_init(void)
+{
+       sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
+}
index 2578fbd95664af84ab6b20aeaf4902d52a7ec265..94f21116dac5eff94f99e2254a9eaef7d8378608 100644 (file)
@@ -562,7 +562,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
 {
        struct smc_connection *conn = &smc->conn;
        struct smc_link_group *lgr = conn->lgr;
-       struct smc_buf_desc *buf_desc = NULL;
+       struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
        struct list_head *buf_list;
        int bufsize, bufsize_short;
        int sk_buf_size;
@@ -575,7 +575,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
                /* use socket send buffer size (w/o overhead) as start value */
                sk_buf_size = smc->sk.sk_sndbuf / 2;
 
-       for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
+       for (bufsize_short = smc_compress_bufsize(sk_buf_size);
             bufsize_short >= 0; bufsize_short--) {
 
                if (is_rmb) {
index 7b1ee5a0b03cd10d167a6ca522243c4285996151..5dd4e6c9fef21f650db78907e0fa46ee09413c71 100644 (file)
@@ -264,7 +264,7 @@ out:
        return status;
 }
 
-static struct cache_detail rsi_cache_template = {
+static const struct cache_detail rsi_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = RSI_HASHMAX,
        .name           = "auth.rpcsec.init",
@@ -524,7 +524,7 @@ out:
        return status;
 }
 
-static struct cache_detail rsc_cache_template = {
+static const struct cache_detail rsc_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = RSC_HASHMAX,
        .name           = "auth.rpcsec.context",
@@ -855,11 +855,13 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
                return stat;
        if (integ_len > buf->len)
                return stat;
-       if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
-               BUG();
+       if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) {
+               WARN_ON_ONCE(1);
+               return stat;
+       }
        /* copy out mic... */
        if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
-               BUG();
+               return stat;
        if (mic.len > RPC_MAX_AUTH_SIZE)
                return stat;
        mic.data = kmalloc(mic.len, GFP_KERNEL);
@@ -1611,8 +1613,10 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
        BUG_ON(integ_len % 4);
        *p++ = htonl(integ_len);
        *p++ = htonl(gc->gc_seq);
-       if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len))
-               BUG();
+       if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) {
+               WARN_ON_ONCE(1);
+               goto out_err;
+       }
        if (resbuf->tail[0].iov_base == NULL) {
                if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
                        goto out_err;
index 79d55d949d9a794a1501aee45f4807e76c7bfa1d..e68943895be48e36f6225280b5f7510abb541dec 100644 (file)
@@ -1674,7 +1674,7 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net)
 }
 EXPORT_SYMBOL_GPL(cache_unregister_net);
 
-struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
+struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
 {
        struct cache_detail *cd;
        int i;
index 71de77bd44236dee6bd7ea1e86b8e317aee65060..f9307bd6644b704ad4e038dcd850ea13c062146f 100644 (file)
@@ -250,9 +250,9 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
        svc_xprt_received(new);
 }
 
-int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
-                   struct net *net, const int family,
-                   const unsigned short port, int flags)
+static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
+                           struct net *net, const int family,
+                           const unsigned short port, int flags)
 {
        struct svc_xprt_class *xcl;
 
@@ -380,7 +380,6 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
        struct svc_pool *pool;
        struct svc_rqst *rqstp = NULL;
        int cpu;
-       bool queued = false;
 
        if (!svc_xprt_has_something_to_do(xprt))
                goto out;
@@ -401,58 +400,25 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
 
        atomic_long_inc(&pool->sp_stats.packets);
 
-redo_search:
+       dprintk("svc: transport %p put into queue\n", xprt);
+       spin_lock_bh(&pool->sp_lock);
+       list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
+       pool->sp_stats.sockets_queued++;
+       spin_unlock_bh(&pool->sp_lock);
+
        /* find a thread for this xprt */
        rcu_read_lock();
        list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
-               /* Do a lockless check first */
-               if (test_bit(RQ_BUSY, &rqstp->rq_flags))
+               if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
                        continue;
-
-               /*
-                * Once the xprt has been queued, it can only be dequeued by
-                * the task that intends to service it. All we can do at that
-                * point is to try to wake this thread back up so that it can
-                * do so.
-                */
-               if (!queued) {
-                       spin_lock_bh(&rqstp->rq_lock);
-                       if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
-                               /* already busy, move on... */
-                               spin_unlock_bh(&rqstp->rq_lock);
-                               continue;
-                       }
-
-                       /* this one will do */
-                       rqstp->rq_xprt = xprt;
-                       svc_xprt_get(xprt);
-                       spin_unlock_bh(&rqstp->rq_lock);
-               }
-               rcu_read_unlock();
-
                atomic_long_inc(&pool->sp_stats.threads_woken);
                wake_up_process(rqstp->rq_task);
-               put_cpu();
-               goto out;
-       }
-       rcu_read_unlock();
-
-       /*
-        * We didn't find an idle thread to use, so we need to queue the xprt.
-        * Do so and then search again. If we find one, we can't hook this one
-        * up to it directly but we can wake the thread up in the hopes that it
-        * will pick it up once it searches for a xprt to service.
-        */
-       if (!queued) {
-               queued = true;
-               dprintk("svc: transport %p put into queue\n", xprt);
-               spin_lock_bh(&pool->sp_lock);
-               list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
-               pool->sp_stats.sockets_queued++;
-               spin_unlock_bh(&pool->sp_lock);
-               goto redo_search;
+               goto out_unlock;
        }
+       set_bit(SP_CONGESTED, &pool->sp_flags);
        rqstp = NULL;
+out_unlock:
+       rcu_read_unlock();
        put_cpu();
 out:
        trace_svc_xprt_do_enqueue(xprt, rqstp);
@@ -721,38 +687,25 @@ rqst_should_sleep(struct svc_rqst *rqstp)
 
 static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
 {
-       struct svc_xprt *xprt;
        struct svc_pool         *pool = rqstp->rq_pool;
        long                    time_left = 0;
 
        /* rq_xprt should be clear on entry */
        WARN_ON_ONCE(rqstp->rq_xprt);
 
-       /* Normally we will wait up to 5 seconds for any required
-        * cache information to be provided.
-        */
-       rqstp->rq_chandle.thread_wait = 5*HZ;
-
-       xprt = svc_xprt_dequeue(pool);
-       if (xprt) {
-               rqstp->rq_xprt = xprt;
-
-               /* As there is a shortage of threads and this request
-                * had to be queued, don't allow the thread to wait so
-                * long for cache updates.
-                */
-               rqstp->rq_chandle.thread_wait = 1*HZ;
-               clear_bit(SP_TASK_PENDING, &pool->sp_flags);
-               return xprt;
-       }
+       rqstp->rq_xprt = svc_xprt_dequeue(pool);
+       if (rqstp->rq_xprt)
+               goto out_found;
 
        /*
         * We have to be able to interrupt this wait
         * to bring down the daemons ...
         */
        set_current_state(TASK_INTERRUPTIBLE);
+       smp_mb__before_atomic();
+       clear_bit(SP_CONGESTED, &pool->sp_flags);
        clear_bit(RQ_BUSY, &rqstp->rq_flags);
-       smp_mb();
+       smp_mb__after_atomic();
 
        if (likely(rqst_should_sleep(rqstp)))
                time_left = schedule_timeout(timeout);
@@ -761,13 +714,11 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
 
        try_to_freeze();
 
-       spin_lock_bh(&rqstp->rq_lock);
        set_bit(RQ_BUSY, &rqstp->rq_flags);
-       spin_unlock_bh(&rqstp->rq_lock);
-
-       xprt = rqstp->rq_xprt;
-       if (xprt != NULL)
-               return xprt;
+       smp_mb__after_atomic();
+       rqstp->rq_xprt = svc_xprt_dequeue(pool);
+       if (rqstp->rq_xprt)
+               goto out_found;
 
        if (!time_left)
                atomic_long_inc(&pool->sp_stats.threads_timedout);
@@ -775,6 +726,15 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
        if (signalled() || kthread_should_stop())
                return ERR_PTR(-EINTR);
        return ERR_PTR(-EAGAIN);
+out_found:
+       /* Normally we will wait up to 5 seconds for any required
+        * cache information to be provided.
+        */
+       if (!test_bit(SP_CONGESTED, &pool->sp_flags))
+               rqstp->rq_chandle.thread_wait = 5*HZ;
+       else
+               rqstp->rq_chandle.thread_wait = 1*HZ;
+       return rqstp->rq_xprt;
 }
 
 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -785,7 +745,7 @@ static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt
        serv->sv_tmpcnt++;
        if (serv->sv_temptimer.function == NULL) {
                /* setup timer to age temp transports */
-               serv->sv_temptimer.function = (TIMER_FUNC_TYPE)svc_age_temp_xprts;
+               serv->sv_temptimer.function = svc_age_temp_xprts;
                mod_timer(&serv->sv_temptimer,
                          jiffies + svc_conn_age_period * HZ);
        }
index f81eaa8e08888a1a16041548521a5908bf8a9a50..740b67d5a733bdcd1ad10b6efdf957a8cd9a7889 100644 (file)
@@ -569,7 +569,7 @@ static int unix_gid_show(struct seq_file *m,
        return 0;
 }
 
-static struct cache_detail unix_gid_cache_template = {
+static const struct cache_detail unix_gid_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = GID_HASHMAX,
        .name           = "auth.unix.gid",
@@ -862,7 +862,7 @@ struct auth_ops svcauth_unix = {
        .set_client     = svcauth_unix_set_client,
 };
 
-static struct cache_detail ip_map_cache_template = {
+static const struct cache_detail ip_map_cache_template = {
        .owner          = THIS_MODULE,
        .hash_size      = IP_HASHMAX,
        .name           = "auth.unix.ip",
index 992594b7cc6b699d75614ca45bbf4631c5e1ede2..af7893501e40acdbaf678a373f721264cf398029 100644 (file)
@@ -133,6 +133,10 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
        if (ret)
                goto out_err;
 
+       /* Bump page refcnt so Send completion doesn't release
+        * the rq_buffer before all retransmits are complete.
+        */
+       get_page(virt_to_page(rqst->rq_buffer));
        ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
        if (ret)
                goto out_unmap;
@@ -165,7 +169,6 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
                return -EINVAL;
        }
 
-       /* svc_rdma_sendto releases this page */
        page = alloc_page(RPCRDMA_DEF_GFP);
        if (!page)
                return -ENOMEM;
@@ -184,6 +187,7 @@ xprt_rdma_bc_free(struct rpc_task *task)
 {
        struct rpc_rqst *rqst = task->tk_rqstp;
 
+       put_page(virt_to_page(rqst->rq_buffer));
        kfree(rqst->rq_rbuffer);
 }
 
index 5caf8e722a118659f8b9e8c3531f60a8e738158b..46ec069150d50ff53e93a7f17b0d716fa80503a2 100644 (file)
@@ -290,6 +290,7 @@ static void qp_event_handler(struct ib_event *event, void *context)
                        ib_event_msg(event->event), event->event,
                        event->element.qp);
                set_bit(XPT_CLOSE, &xprt->xpt_flags);
+               svc_xprt_enqueue(xprt);
                break;
        }
 }
@@ -322,8 +323,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
        if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
                goto out;
-       svc_xprt_enqueue(&xprt->sc_xprt);
-       goto out;
+       goto out_enqueue;
 
 flushed:
        if (wc->status != IB_WC_WR_FLUSH_ERR)
@@ -333,6 +333,8 @@ flushed:
        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
        svc_rdma_put_context(ctxt, 1);
 
+out_enqueue:
+       svc_xprt_enqueue(&xprt->sc_xprt);
 out:
        svc_xprt_put(&xprt->sc_xprt);
 }
@@ -358,6 +360,7 @@ void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
+               svc_xprt_enqueue(&xprt->sc_xprt);
                if (wc->status != IB_WC_WR_FLUSH_ERR)
                        pr_err("svcrdma: Send: %s (%u/0x%x)\n",
                               ib_wc_status_msg(wc->status),
@@ -569,8 +572,10 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
                dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
                        xprt, cma_id);
-               if (xprt)
+               if (xprt) {
                        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
+                       svc_xprt_enqueue(&xprt->sc_xprt);
+               }
                break;
 
        default:
index 7821085a7dd87cb81b1b5e3f5c4d33273c65e3d9..95fec2c057d6ebdb223e19ef83bf9c383cb2156e 100644 (file)
@@ -497,6 +497,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
        while ((skb = skb_peek(defq))) {
                hdr = buf_msg(skb);
                mtyp = msg_type(hdr);
+               blks = msg_blocks(hdr);
                deliver = true;
                ack = false;
                update = false;
@@ -539,14 +540,13 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
                        tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
 
                if (leave) {
-                       tipc_group_delete_member(grp, m);
                        __skb_queue_purge(defq);
+                       tipc_group_delete_member(grp, m);
                        break;
                }
                if (!update)
                        continue;
 
-               blks = msg_blocks(hdr);
                tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
        }
        return;
index 1649d456e22d13ed9bb9d719c27265007997d18c..b0d07b35909d3349fd7fae2526c96bddc93b2b4e 100644 (file)
@@ -174,7 +174,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 
        if (fragid == LAST_FRAGMENT) {
                TIPC_SKB_CB(head)->validated = false;
-               if (unlikely(!tipc_msg_validate(head)))
+               if (unlikely(!tipc_msg_validate(&head)))
                        goto err;
                *buf = head;
                TIPC_SKB_CB(head)->tail = NULL;
@@ -201,11 +201,21 @@ err:
  * TIPC will ignore the excess, under the assumption that it is optional info
  * introduced by a later release of the protocol.
  */
-bool tipc_msg_validate(struct sk_buff *skb)
+bool tipc_msg_validate(struct sk_buff **_skb)
 {
-       struct tipc_msg *msg;
+       struct sk_buff *skb = *_skb;
+       struct tipc_msg *hdr;
        int msz, hsz;
 
+       /* Ensure that flow control ratio condition is satisfied */
+       if (unlikely(skb->truesize / buf_roundup_len(skb) > 4)) {
+               skb = skb_copy(skb, GFP_ATOMIC);
+               if (!skb)
+                       return false;
+               kfree_skb(*_skb);
+               *_skb = skb;
+       }
+
        if (unlikely(TIPC_SKB_CB(skb)->validated))
                return true;
        if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
@@ -217,11 +227,11 @@ bool tipc_msg_validate(struct sk_buff *skb)
        if (unlikely(!pskb_may_pull(skb, hsz)))
                return false;
 
-       msg = buf_msg(skb);
-       if (unlikely(msg_version(msg) != TIPC_VERSION))
+       hdr = buf_msg(skb);
+       if (unlikely(msg_version(hdr) != TIPC_VERSION))
                return false;
 
-       msz = msg_size(msg);
+       msz = msg_size(hdr);
        if (unlikely(msz < hsz))
                return false;
        if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
@@ -411,7 +421,7 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
        skb_pull(*iskb, offset);
        imsz = msg_size(buf_msg(*iskb));
        skb_trim(*iskb, imsz);
-       if (unlikely(!tipc_msg_validate(*iskb)))
+       if (unlikely(!tipc_msg_validate(iskb)))
                goto none;
        *pos += align(imsz);
        return true;
index bf8f57ccc70cc163a6626af03dcadae0a5303764..3e4384c222f705fdf20a56c53c4abf363bcd7186 100644 (file)
@@ -926,7 +926,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
 }
 
 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
-bool tipc_msg_validate(struct sk_buff *skb);
+bool tipc_msg_validate(struct sk_buff **_skb);
 bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
                     struct sk_buff_head *xmitq);
@@ -954,6 +954,11 @@ static inline u16 buf_seqno(struct sk_buff *skb)
        return msg_seqno(buf_msg(skb));
 }
 
+static inline int buf_roundup_len(struct sk_buff *skb)
+{
+       return (skb->len / 1024 + 1) * 1024;
+}
+
 /* tipc_skb_peek(): peek and reserve first buffer in list
  * @list: list to be peeked in
  * Returns pointer to first buffer in list, if any
index 009a81631280027c39272e67dc5fe1140257e3d6..507017fe0f1b5263f819411388240ef64cab935d 100644 (file)
@@ -1539,7 +1539,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
        __skb_queue_head_init(&xmitq);
 
        /* Ensure message is well-formed before touching the header */
-       if (unlikely(!tipc_msg_validate(skb)))
+       if (unlikely(!tipc_msg_validate(&skb)))
                goto discard;
        hdr = buf_msg(skb);
        usr = msg_user(hdr);
index 391775e3575c24a81ae70f75a451645c0b734b73..a7a73ffe675b2a9e829a375e90fc3d055386dcf8 100644 (file)
@@ -797,11 +797,13 @@ static void vmci_transport_handle_detach(struct sock *sk)
 
                /* We should not be sending anymore since the peer won't be
                 * there to receive, but we can still receive if there is data
-                * left in our consume queue.
+                * left in our consume queue. If the local endpoint is a host,
+                * we can't call vsock_stream_has_data, since that may block,
+                * but a host endpoint can't read data once the VM has
+                * detached, so there is no available data in that case.
                 */
-               if (vsock_stream_has_data(vsk) <= 0) {
-                       sk->sk_state = TCP_CLOSE;
-
+               if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
+                   vsock_stream_has_data(vsk) <= 0) {
                        if (sk->sk_state == TCP_SYN_SENT) {
                                /* The peer may detach from a queue pair while
                                 * we are still in the connecting state, i.e.,
@@ -811,10 +813,12 @@ static void vmci_transport_handle_detach(struct sock *sk)
                                 * event like a reset.
                                 */
 
+                               sk->sk_state = TCP_CLOSE;
                                sk->sk_err = ECONNRESET;
                                sk->sk_error_report(sk);
                                return;
                        }
+                       sk->sk_state = TCP_CLOSE;
                }
                sk->sk_state_change(sk);
        }
@@ -2144,7 +2148,7 @@ module_exit(vmci_transport_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.4.0-k");
+MODULE_VERSION("1.0.5.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
index da91bb547db3e7ec2ddcf7526d56821bfe46b6f5..1abcc4fc4df18e81df5cfb072e81ce6970f5e6bc 100644 (file)
@@ -20,6 +20,10 @@ config CFG80211
        tristate "cfg80211 - wireless configuration API"
        depends on RFKILL || !RFKILL
        select FW_LOADER
+       # may need to update this when certificates are changed and are
+       # using a different algorithm, though right now they shouldn't
+       # (this is here rather than below to allow it to be a module)
+       select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS
        ---help---
          cfg80211 is the Linux wireless LAN (802.11) configuration API.
          Enable this if you have a wireless device.
@@ -113,6 +117,9 @@ config CFG80211_EXTRA_REGDB_KEYDIR
          certificates like in the kernel sources (net/wireless/certs/)
          that shall be accepted for a signed regulatory database.
 
+         Note that you need to also select the correct CRYPTO_<hash> modules
+         for your certificates, and if cfg80211 is built-in they also must be.
+
 config CFG80211_REG_CELLULAR_HINTS
        bool "cfg80211 regulatory support for cellular base station hints"
        depends on CFG80211_CERTIFICATION_ONUS
index 459611577d3dfa29f72442dfe1dcdfe4f2c6a502..801d4781a73b6724ce06f95c3be709bb9cd53174 100644 (file)
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(lib80211_crypto_lock);
 static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
                                          int force);
 static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
-static void lib80211_crypt_deinit_handler(unsigned long data);
+static void lib80211_crypt_deinit_handler(struct timer_list *t);
 
 int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
                                spinlock_t *lock)
@@ -55,8 +55,8 @@ int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
        info->lock = lock;
 
        INIT_LIST_HEAD(&info->crypt_deinit_list);
-       setup_timer(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
-                       (unsigned long)info);
+       timer_setup(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
+                   0);
 
        return 0;
 }
@@ -116,9 +116,10 @@ static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
        spin_unlock_irqrestore(info->lock, flags);
 }
 
-static void lib80211_crypt_deinit_handler(unsigned long data)
+static void lib80211_crypt_deinit_handler(struct timer_list *t)
 {
-       struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
+       struct lib80211_crypt_info *info = from_timer(info, t,
+                                                     crypt_deinit_timer);
        unsigned long flags;
 
        lib80211_crypt_deinit_entries(info, 0);
index bb16f1ec766ead1e65fb6e4196a4278ff09a67a3..b1ac23ca20c86be0af71e9a1ba92cc99d8d5a967 100644 (file)
@@ -2605,10 +2605,32 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
                        goto nla_put_failure;
        }
 
-       if (wdev->ssid_len) {
-               if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
+       wdev_lock(wdev);
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_AP:
+               if (wdev->ssid_len &&
+                   nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
                        goto nla_put_failure;
+               break;
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_ADHOC: {
+               const u8 *ssid_ie;
+               if (!wdev->current_bss)
+                       break;
+               ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
+                                              WLAN_EID_SSID);
+               if (!ssid_ie)
+                       break;
+               if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
+                       goto nla_put_failure;
+               break;
+               }
+       default:
+               /* nothing */
+               break;
        }
+       wdev_unlock(wdev);
 
        genlmsg_end(msg, hdr);
        return 0;
@@ -6291,7 +6313,7 @@ static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb,
        if (!hdr)
                return -1;
 
-       genl_dump_check_consistent(cb, hdr, &nl80211_fam);
+       genl_dump_check_consistent(cb, hdr);
 
        if (nl80211_put_regdom(regdom, msg))
                goto nla_put_failure;
@@ -7722,7 +7744,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
        if (!hdr)
                return -1;
 
-       genl_dump_check_consistent(cb, hdr, &nl80211_fam);
+       genl_dump_check_consistent(cb, hdr);
 
        if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation))
                goto nla_put_failure;
index 3871998059de7beb9a2bff9a5321a8eb81bfcff4..78e71b0390be90bc16655d380fa6869391c52729 100644 (file)
@@ -3644,27 +3644,14 @@ void regulatory_propagate_dfs_state(struct wiphy *wiphy,
        }
 }
 
-int __init regulatory_init(void)
+static int __init regulatory_init_db(void)
 {
-       int err = 0;
+       int err;
 
        err = load_builtin_regdb_keys();
        if (err)
                return err;
 
-       reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
-       if (IS_ERR(reg_pdev))
-               return PTR_ERR(reg_pdev);
-
-       spin_lock_init(&reg_requests_lock);
-       spin_lock_init(&reg_pending_beacons_lock);
-       spin_lock_init(&reg_indoor_lock);
-
-       rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
-
-       user_alpha2[0] = '9';
-       user_alpha2[1] = '7';
-
        /* We always try to get an update for the static regdomain */
        err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
        if (err) {
@@ -3692,6 +3679,31 @@ int __init regulatory_init(void)
 
        return 0;
 }
+#ifndef MODULE
+late_initcall(regulatory_init_db);
+#endif
+
+int __init regulatory_init(void)
+{
+       reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
+       if (IS_ERR(reg_pdev))
+               return PTR_ERR(reg_pdev);
+
+       spin_lock_init(&reg_requests_lock);
+       spin_lock_init(&reg_pending_beacons_lock);
+       spin_lock_init(&reg_indoor_lock);
+
+       rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
+
+       user_alpha2[0] = '9';
+       user_alpha2[1] = '7';
+
+#ifdef MODULE
+       return regulatory_init_db();
+#else
+       return 0;
+#endif
+}
 
 void regulatory_exit(void)
 {
index ea87143314f3048f8f08ae6974f7f9938e306350..562cc11131f6c8ba37cb5e4e09b1587dca3c8a2c 100644 (file)
@@ -415,7 +415,7 @@ static void __x25_destroy_socket(struct sock *sk)
        if (sk_has_allocations(sk)) {
                /* Defer: outstanding buffers */
                sk->sk_timer.expires  = jiffies + 10 * HZ;
-               sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_destroy_timer;
+               sk->sk_timer.function = x25_destroy_timer;
                add_timer(&sk->sk_timer);
        } else {
                /* drop last reference so sock_put will free */
index e0cd04d283527cde1c32d430b35cec56ebb02860..a6a8ab09b914660fcd600255cf6ff3b2dc46bc32 100644 (file)
@@ -36,7 +36,7 @@
 LIST_HEAD(x25_neigh_list);
 DEFINE_RWLOCK(x25_neigh_list_lock);
 
-static void x25_t20timer_expiry(unsigned long);
+static void x25_t20timer_expiry(struct timer_list *);
 
 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
 static void x25_transmit_restart_request(struct x25_neigh *nb);
@@ -49,9 +49,9 @@ static inline void x25_start_t20timer(struct x25_neigh *nb)
        mod_timer(&nb->t20timer, jiffies + nb->t20);
 }
 
-static void x25_t20timer_expiry(unsigned long param)
+static void x25_t20timer_expiry(struct timer_list *t)
 {
-       struct x25_neigh *nb = (struct x25_neigh *)param;
+       struct x25_neigh *nb = from_timer(nb, t, t20timer);
 
        x25_transmit_restart_request(nb);
 
@@ -252,7 +252,7 @@ void x25_link_device_up(struct net_device *dev)
                return;
 
        skb_queue_head_init(&nb->queue);
-       setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
+       timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
 
        dev_hold(dev);
        nb->dev      = dev;
index 1dfba3c23459e9f00904592857e74e730fd49216..fa3461002b3ea3bcdbabc4d195aef237b56a13f5 100644 (file)
@@ -36,7 +36,7 @@ void x25_init_timers(struct sock *sk)
        timer_setup(&x25->timer, x25_timer_expiry, 0);
 
        /* initialized by sock_init_data */
-       sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_heartbeat_expiry;
+       sk->sk_timer.function = x25_heartbeat_expiry;
 }
 
 void x25_start_heartbeat(struct sock *sk)
index 2f57722f5d0391762894b7d228f1b94a4e22d74e..9542975eb2f90dcb2bae894edeb9b418d04f252e 100644 (file)
@@ -1305,6 +1305,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
                newp->xfrm_nr = old->xfrm_nr;
                newp->index = old->index;
                newp->type = old->type;
+               newp->family = old->family;
                memcpy(newp->xfrm_vec, old->xfrm_vec,
                       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
                spin_lock_bh(&net->xfrm.xfrm_policy_lock);
@@ -1360,29 +1361,36 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
        struct net *net = xp_net(policy);
        int nx;
        int i, error;
+       xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
+       xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
        xfrm_address_t tmp;
 
        for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
                struct xfrm_state *x;
-               xfrm_address_t *local;
-               xfrm_address_t *remote;
+               xfrm_address_t *remote = daddr;
+               xfrm_address_t *local  = saddr;
                struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
 
-               remote = &tmpl->id.daddr;
-               local = &tmpl->saddr;
-               if (xfrm_addr_any(local, tmpl->encap_family)) {
-                       error = xfrm_get_saddr(net, fl->flowi_oif,
-                                              &tmp, remote,
-                                              tmpl->encap_family, 0);
-                       if (error)
-                               goto fail;
-                       local = &tmp;
+               if (tmpl->mode == XFRM_MODE_TUNNEL ||
+                   tmpl->mode == XFRM_MODE_BEET) {
+                       remote = &tmpl->id.daddr;
+                       local = &tmpl->saddr;
+                       if (xfrm_addr_any(local, tmpl->encap_family)) {
+                               error = xfrm_get_saddr(net, fl->flowi_oif,
+                                                      &tmp, remote,
+                                                      tmpl->encap_family, 0);
+                               if (error)
+                                       goto fail;
+                               local = &tmp;
+                       }
                }
 
                x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
 
                if (x && x->km.state == XFRM_STATE_VALID) {
                        xfrm[nx++] = x;
+                       daddr = remote;
+                       saddr = local;
                        continue;
                }
                if (x) {
index 1f5cee2269af4296bd41745adec063a4a04faa9f..065d89606888ec1bf053577d3949746bcea6f099 100644 (file)
@@ -556,7 +556,7 @@ out:
        return HRTIMER_NORESTART;
 }
 
-static void xfrm_replay_timer_handler(unsigned long data);
+static void xfrm_replay_timer_handler(struct timer_list *t);
 
 struct xfrm_state *xfrm_state_alloc(struct net *net)
 {
@@ -574,8 +574,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
                INIT_HLIST_NODE(&x->byspi);
                tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
                                        CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
-               setup_timer(&x->rtimer, xfrm_replay_timer_handler,
-                               (unsigned long)x);
+               timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
                x->curlft.add_time = get_seconds();
                x->lft.soft_byte_limit = XFRM_INF;
                x->lft.soft_packet_limit = XFRM_INF;
@@ -1879,9 +1878,9 @@ void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
 }
 EXPORT_SYMBOL(xfrm_state_walk_done);
 
-static void xfrm_replay_timer_handler(unsigned long data)
+static void xfrm_replay_timer_handler(struct timer_list *t)
 {
-       struct xfrm_state *x = (struct xfrm_state *)data;
+       struct xfrm_state *x = from_timer(x, t, rtimer);
 
        spin_lock(&x->lock);
 
index 3b4945c1eab06aec48c326f6b1d822cb158461ff..adeaa1302f346a2c2af0843584d2e1803e8f66a6 100644 (file)
@@ -1,7 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := test_lru_dist
 hostprogs-y += sock_example
index f5c3012ffa795b676b7e4ff0bb63626844f1ad2e..dec1b22adf54afff9651f92eddb65a9aaa1e4d76 100644 (file)
@@ -1,7 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := hid-example
 
index 19a870eed82b398307404431947e86d727db1674..0e349b80686e76a421759b931ec04d194e446768 100644 (file)
@@ -1,7 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct
 
 HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
index 9291ab8e0f8c5a089425ae65f9ee20e718e0a9ff..73f1da4d116cf9a78a01cb3293d11f8a27a589cc 100644 (file)
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := sockmap
 
index 1f80a3d8cf45ca913e97ba5094443af07a12b6ae..59df7c25a9d1589caa4ad444768b33055c9f99f8 100644 (file)
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-$(CONFIG_SAMPLE_STATX) := test-statx
 
index c95a696560a7de59f3a7839ef85ec57d90c3aa29..8d7fd6190ac4e9f07cd2de96f9e144a3b73fdc5b 100644 (file)
@@ -1,6 +1,3 @@
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
 # List of programs to build
 hostprogs-y := uhid-example
 
index 9ffd3dda3889c56a7a72229bed21ff5c49d62856..065324a8046ff8e0af179e6e90f335afb8d99179 100644 (file)
@@ -8,6 +8,8 @@ squote  := '
 empty   :=
 space   := $(empty) $(empty)
 space_escape := _-_SPACE_-_
+right_paren := )
+left_paren := (
 
 ###
 # Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -80,6 +82,71 @@ cc-cross-prefix =  \
                        echo $(c);                                    \
                fi)))
 
+# Tools for caching Makefile variables that are "expensive" to compute.
+#
+# Here we want to help deal with variables that take a long time to compute
+# by making it easy to store these variables in a cache.
+#
+# The canonical example here is testing for compiler flags.  On a simple system
+# each call to the compiler takes 10 ms, but on a system with a compiler that's
+# called through various wrappers it can take upwards of 100 ms.  If we have
+# 100 calls to the compiler this can take 1 second (on a simple system) or 10
+# seconds (on a complicated system).
+#
+# The "cache" will be in Makefile syntax and can be directly included.
+# Any time we try to reference a variable that's not in the cache we'll
+# calculate it and store it in the cache for next time.
+
+# Include values from last time
+make-cache := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/,$(if $(obj),$(obj)/)).cache.mk
+$(make-cache): ;
+-include $(make-cache)
+
+cached-data := $(filter __cached_%, $(.VARIABLES))
+
+# If cache exceeds 1000 lines, shrink it down to 500.
+ifneq ($(word 1000,$(cached-data)),)
+$(shell tail -n 500 $(make-cache) > $(make-cache).tmp; \
+       mv $(make-cache).tmp $(make-cache))
+endif
+
+create-cache-dir := $(if $(KBUILD_SRC),$(if $(cache-data),,1))
+
+# Usage: $(call __sanitize-opt,Hello=Hola$(comma)Goodbye Adios)
+#
+# Convert all '$', ')', '(', '\', '=', ' ', ',', ':' to '_'
+__sanitize-opt = $(subst $$,_,$(subst $(right_paren),_,$(subst $(left_paren),_,$(subst \,_,$(subst =,_,$(subst $(space),_,$(subst $(comma),_,$(subst :,_,$(1)))))))))
+
+# Usage:   $(call shell-cached,shell_command)
+# Example: $(call shell-cached,md5sum /usr/bin/gcc)
+#
+# If we've already seen a call to this exact shell command (even in a
+# previous invocation of make!) we'll return the value.  If not, we'll
+# compute it and store the result for future runs.
+#
+# This is a bit of voodoo, but basic explanation is that if the variable
+# was undefined then we'll evaluate the shell command and store the result
+# into the variable.  We'll then store that value in the cache and finally
+# output the value.
+#
+# NOTE: The $$(2) here isn't actually a parameter to __run-and-store.  We
+# happen to know that the caller will have their shell command in $(2) so the
+# result of "call"ing this will produce a reference to that $(2).  The reason
+# for this strangeness is to avoid an extra level of eval (and escaping) of
+# $(2).
+define __run-and-store
+ifeq ($(origin $(1)),undefined)
+  $$(eval $(1) := $$(shell $$(2)))
+ifeq ($(create-cache-dir),1)
+  $$(shell mkdir -p $(dir $(make-cache)))
+  $$(eval create-cache-dir :=)
+endif
+  $$(shell echo '$(1) := $$($(1))' >> $(make-cache))
+endif
+endef
+__shell-cached = $(eval $(call __run-and-store,$(1)))$($(1))
+shell-cached = $(call __shell-cached,__cached_$(call __sanitize-opt,$(1)),$(1))
+
 # output directory for tests below
 TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
 
@@ -87,30 +154,36 @@ TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
 # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
 # Exit code chooses option. "$$TMP" serves as a temporary file and is
 # automatically cleaned up.
-try-run = $(shell set -e;              \
+__try-run = set -e;                    \
        TMP="$(TMPOUT).$$$$.tmp";       \
        TMPO="$(TMPOUT).$$$$.o";        \
        if ($(1)) >/dev/null 2>&1;      \
        then echo "$(2)";               \
        else echo "$(3)";               \
        fi;                             \
-       rm -f "$$TMP" "$$TMPO")
+       rm -f "$$TMP" "$$TMPO"
+
+try-run = $(shell $(__try-run))
+
+# try-run-cached
+# This works like try-run, but the result is cached.
+try-run-cached = $(call shell-cached,$(__try-run))
 
 # as-option
 # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
 
-as-option = $(call try-run,\
+as-option = $(call try-run-cached,\
        $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
 
 # as-instr
 # Usage: cflags-y += $(call as-instr,instr,option1,option2)
 
-as-instr = $(call try-run,\
+as-instr = $(call try-run-cached,\
        printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
 # __cc-option
 # Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
-__cc-option = $(call try-run,\
+__cc-option = $(call try-run-cached,\
        $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
 
 # Do not attempt to build with gcc plugins during cc-option tests.
@@ -130,23 +203,23 @@ hostcc-option = $(call __cc-option, $(HOSTCC),\
 
 # cc-option-yn
 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
-cc-option-yn = $(call try-run,\
+cc-option-yn = $(call try-run-cached,\
        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
 
 # cc-disable-warning
 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
-cc-disable-warning = $(call try-run,\
+cc-disable-warning = $(call try-run-cached,\
        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-name
 # Expands to either gcc or clang
-cc-name = $(shell $(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc)
+cc-name = $(call shell-cached,$(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc)
 
 # cc-version
-cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
+cc-version = $(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
 
 # cc-fullversion
-cc-fullversion = $(shell $(CONFIG_SHELL) \
+cc-fullversion = $(call shell-cached,$(CONFIG_SHELL) \
        $(srctree)/scripts/gcc-version.sh -p $(CC))
 
 # cc-ifversion
@@ -159,22 +232,23 @@ cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo
 
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
-cc-ldoption = $(call try-run,\
-       $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+cc-ldoption = $(call try-run-cached,\
+       $(CC) $(1) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # ld-option
 # Usage: LDFLAGS += $(call ld-option, -X)
-ld-option = $(call try-run,\
-       $(CC) -x c /dev/null -c -o "$$TMPO" ; $(LD) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
+ld-option = $(call try-run-cached,\
+       $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \
+       $(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
 
 # ar-option
 # Usage: KBUILD_ARFLAGS := $(call ar-option,D)
 # Important: no spaces around options
-ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
+ar-option = $(call try-run-cached, $(AR) rc$(1) "$$TMP",$(1),$(2))
 
 # ld-version
 # Note this is mainly for HJ Lu's 3 number binutil versions
-ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
+ld-version = $(call shell-cached,$(LD) --version | $(srctree)/scripts/ld-version.sh)
 
 # ld-ifversion
 # Usage:  $(call ld-ifversion, -ge, 22252, y)
index 524eeedc8d25d2d5a2233329b8b73acb39fd72ac..32ad8e93fbe15c44216ea76c63fcbf87f687445e 100644 (file)
@@ -6,6 +6,9 @@
 # and for each file listed in this file with generic-y creates
 # a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/$(src))
 
+PHONY := all
+all:
+
 kbuild-file := $(srctree)/arch/$(SRCARCH)/include/$(src)/Kbuild
 -include $(kbuild-file)
 
index e63af4e19382af618b365d5b9ebe50321d0b0407..cb8997ed01497ccebdfded3aef8ccdd401fa8482 100644 (file)
@@ -65,15 +65,6 @@ ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(h
 include scripts/Makefile.host
 endif
 
-ifneq ($(KBUILD_SRC),)
-# Create output directory if not already present
-_dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
-
-# Create directories for object files if directory does not exist
-# Needed when obj-y := dir/file.o syntax is used
-_dummy := $(foreach d,$(obj-dirs), $(shell [ -d $(d) ] || mkdir -p $(d)))
-endif
-
 ifndef obj
 $(warning kbuild: Makefile.build is included improperly)
 endif
@@ -85,7 +76,7 @@ lib-target := $(obj)/lib.a
 obj-y += $(obj)/lib-ksyms.o
 endif
 
-ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
+ifneq ($(strip $(obj-y) $(need-builtin)),)
 builtin-target := $(obj)/built-in.o
 endif
 
@@ -109,6 +100,10 @@ ifneq ($(KBUILD_CHECKSRC),0)
   endif
 endif
 
+ifneq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),)
+  cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $< ;
+endif
+
 # Do section mismatch analysis for each module/built-in.o
 ifdef CONFIG_DEBUG_SECTION_MISMATCH
   cmd_secanalysis = ; scripts/mod/modpost $@
@@ -292,6 +287,7 @@ define rule_cc_o_c
        $(call echo-cmd,checksrc) $(cmd_checksrc)                         \
        $(call cmd_and_fixdep,cc_o_c)                                     \
        $(cmd_modversions_c)                                              \
+       $(cmd_checkdoc)                                                   \
        $(call echo-cmd,objtool) $(cmd_objtool)                           \
        $(call echo-cmd,record_mcount) $(cmd_record_mcount)
 endef
@@ -563,14 +559,14 @@ $(multi-used-m): FORCE
 $(call multi_depend, $(multi-used-m), .o, -objs -y -m)
 
 targets += $(multi-used-y) $(multi-used-m)
-
+targets := $(filter-out $(PHONY), $(targets))
 
 # Descending
 # ---------------------------------------------------------------------------
 
 PHONY += $(subdir-ym)
 $(subdir-ym):
-       $(Q)$(MAKE) $(build)=$@
+       $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1)
 
 # Add FORCE to the prequisites of a target to force it to be always rebuilt.
 # ---------------------------------------------------------------------------
@@ -584,13 +580,23 @@ FORCE:
 # optimization, we don't need to read them if the target does not
 # exist, we will rebuild anyway in that case.
 
-targets := $(wildcard $(sort $(targets)))
-cmd_files := $(wildcard $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
 
 ifneq ($(cmd_files),)
   include $(cmd_files)
 endif
 
+ifneq ($(KBUILD_SRC),)
+# Create directories for object files if they do not exist
+obj-dirs := $(sort $(obj) $(patsubst %/,%, $(dir $(targets))))
+# If cmd_files exist, their directories apparently exist.  Skip mkdir.
+exist-dirs := $(sort $(patsubst %/,%, $(dir $(cmd_files))))
+obj-dirs := $(strip $(filter-out $(exist-dirs), $(obj-dirs)))
+ifneq ($(obj-dirs),)
+$(shell mkdir -p $(obj-dirs))
+endif
+endif
+
 # Declare the contents of the .PHONY variable as phony.  We keep that
 # information in a variable se we can use it in if_changed and friends.
 
index 99967948d764259228b50b7e8af8110eda5126a2..d5e1314711312c6572001170a506b022ac43f1fc 100644 (file)
@@ -27,11 +27,11 @@ subdirs       := $(patsubst $(srcdir)/%/,%,\
 # Recursion
 __headers: $(subdirs)
 
-.PHONY: $(subdirs)
+PHONY += $(subdirs)
 $(subdirs):
        $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(dst)/$@
 
-# Skip header install/check for include/uapi and arch/$(hdr-arch)/include/uapi.
+# Skip header install/check for include/uapi and arch/$(SRCARCH)/include/uapi.
 # We have only sub-directories there.
 skip-inst := $(if $(filter %/uapi,$(obj)),1)
 
@@ -115,9 +115,8 @@ $(check-file): scripts/headers_check.pl $(output-files) FORCE
 
 endif
 
-targets := $(wildcard $(sort $(targets)))
 cmd_files := $(wildcard \
-             $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+             $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
 
 ifneq ($(cmd_files),)
        include $(cmd_files)
@@ -125,6 +124,7 @@ endif
 
 endif # skip-inst
 
-.PHONY: $(PHONY)
 PHONY += FORCE
 FORCE: ;
+
+.PHONY: $(PHONY)
diff --git a/scripts/Makefile.help b/scripts/Makefile.help
deleted file mode 100644 (file)
index d03608f..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-
-checker-help:
-       @echo  '  coccicheck      - Check with Coccinelle.'
index 10e5c3cb89dc09997e49dd341ab81e6086b29c46..e6dc6ae2d7c4d1addb6193f30c0d6c920b1d86d5 100644 (file)
@@ -49,15 +49,6 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
 host-cshobjs   := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
 host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
 
-# output directory for programs/.o files
-# hostprogs-y := tools/build may have been specified.
-# Retrieve also directory of .o files from prog-objs or prog-cxxobjs notation
-host-objdirs := $(dir $(__hostprogs) $(host-cobjs) $(host-cxxobjs))
-
-host-objdirs := $(strip $(sort $(filter-out ./,$(host-objdirs))))
-
-
-__hostprogs     := $(addprefix $(obj)/,$(__hostprogs))
 host-csingle   := $(addprefix $(obj)/,$(host-csingle))
 host-cmulti    := $(addprefix $(obj)/,$(host-cmulti))
 host-cobjs     := $(addprefix $(obj)/,$(host-cobjs))
@@ -67,9 +58,6 @@ host-cshlib   := $(addprefix $(obj)/,$(host-cshlib))
 host-cxxshlib  := $(addprefix $(obj)/,$(host-cxxshlib))
 host-cshobjs   := $(addprefix $(obj)/,$(host-cshobjs))
 host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
-host-objdirs    := $(addprefix $(obj)/,$(host-objdirs))
-
-obj-dirs += $(host-objdirs)
 
 #####
 # Handle options to gcc. Support building with separate output directory
diff --git a/scripts/Makefile.kcov b/scripts/Makefile.kcov
new file mode 100644 (file)
index 0000000..5cc7203
--- /dev/null
@@ -0,0 +1,7 @@
+ifdef CONFIG_KCOV
+CFLAGS_KCOV    := $(call cc-option,-fsanitize-coverage=trace-pc,)
+ifeq ($(CONFIG_KCOV_ENABLE_COMPARISONS),y)
+CFLAGS_KCOV += $(call cc-option,-fsanitize-coverage=trace-cmp,)
+endif
+
+endif
index 2278405cbc80e2ba666b8a1bdaaf785375d75103..1ca4dcd2d5005146a5c58b2385ac5f4c5ea0b8c1 100644 (file)
@@ -5,24 +5,25 @@ ccflags-y  += $(EXTRA_CFLAGS)
 cppflags-y += $(EXTRA_CPPFLAGS)
 ldflags-y  += $(EXTRA_LDFLAGS)
 
-#
-# flags that take effect in sub directories
-export KBUILD_SUBDIR_ASFLAGS := $(KBUILD_SUBDIR_ASFLAGS) $(subdir-asflags-y)
-export KBUILD_SUBDIR_CCFLAGS := $(KBUILD_SUBDIR_CCFLAGS) $(subdir-ccflags-y)
+# flags that take effect in current and sub directories
+KBUILD_AFLAGS += $(subdir-asflags-y)
+KBUILD_CFLAGS += $(subdir-ccflags-y)
 
 # Figure out what we need to build from the various variables
 # ===========================================================================
 
 # When an object is listed to be built compiled-in and modular,
 # only build the compiled-in version
-
 obj-m := $(filter-out $(obj-y),$(obj-m))
 
 # Libraries are always collected in one lib file.
 # Filter out objects already built-in
-
 lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
 
+# Determine modorder.
+# Unfortunately, we don't have information about ordering between -y
+# and -m subdirs.  Just put -y's first.
+modorder       := $(patsubst %/,%/modules.order, $(filter %/, $(obj-y)) $(obj-m:.o=.ko))
 
 # Handle objects in subdirs
 # ---------------------------------------------------------------------------
@@ -30,12 +31,6 @@ lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
 #   and add the directory to the list of dirs to descend into: $(subdir-y)
 # o if we encounter foo/ in $(obj-m), remove it from $(obj-m)
 #   and add the directory to the list of dirs to descend into: $(subdir-m)
-
-# Determine modorder.
-# Unfortunately, we don't have information about ordering between -y
-# and -m subdirs.  Just put -y's first.
-modorder       := $(patsubst %/,%/modules.order, $(filter %/, $(obj-y)) $(obj-m:.o=.ko))
-
 __subdir-y     := $(patsubst %/,%,$(filter %/, $(obj-y)))
 subdir-y       += $(__subdir-y)
 __subdir-m     := $(patsubst %/,%,$(filter %/, $(obj-m)))
@@ -44,10 +39,9 @@ obj-y                := $(patsubst %/, %/built-in.o, $(obj-y))
 obj-m          := $(filter-out %/, $(obj-m))
 
 # Subdirectories we need to descend into
-
 subdir-ym      := $(sort $(subdir-y) $(subdir-m))
 
-# if $(foo-objs) exists, foo.o is a composite object
+# if $(foo-objs), $(foo-y), or $(foo-m) exists, foo.o is a composite object
 multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m))))
 multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))), $(m))))
 multi-used   := $(multi-used-y) $(multi-used-m)
@@ -57,17 +51,13 @@ single-used-m := $(sort $(filter-out $(multi-used-m),$(obj-m)))
 # objects depend on those (obviously)
 multi-objs-y := $(foreach m, $(multi-used-y), $($(m:.o=-objs)) $($(m:.o=-y)))
 multi-objs-m := $(foreach m, $(multi-used-m), $($(m:.o=-objs)) $($(m:.o=-y)))
-multi-objs   := $(multi-objs-y) $(multi-objs-m)
 
 # $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to
 # tell kbuild to descend
 subdir-obj-y := $(filter %/built-in.o, $(obj-y))
 
-# $(obj-dirs) is a list of directories that contain object files
-obj-dirs := $(dir $(multi-objs) $(obj-y))
-
 # Replace multi-part objects by their individual parts, look at local dir only
-real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
+real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m)))
 real-objs-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m)))
 
 # DTB
@@ -93,11 +83,9 @@ multi-used-m := $(addprefix $(obj)/,$(multi-used-m))
 multi-objs-y   := $(addprefix $(obj)/,$(multi-objs-y))
 multi-objs-m   := $(addprefix $(obj)/,$(multi-objs-m))
 subdir-ym      := $(addprefix $(obj)/,$(subdir-ym))
-obj-dirs       := $(addprefix $(obj)/,$(obj-dirs))
 
 # These flags are needed for modversions and compiling, so we define them here
-# already
-# $(modname_flags) #defines KBUILD_MODNAME as the name of the module it will
+# $(modname_flags) defines KBUILD_MODNAME as the name of the module it will
 # end up in (or would, if it gets compiled in)
 # Note: Files that end up in two or more modules are compiled without the
 #       KBUILD_MODNAME definition. The reason is that any made-up name would
@@ -107,10 +95,10 @@ basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget))
 modname_flags  = $(if $(filter 1,$(words $(modname))),\
                  -DKBUILD_MODNAME=$(call name-fix,$(modname)))
 
-orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
+orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \
                  $(ccflags-y) $(CFLAGS_$(basetarget).o)
 _c_flags       = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
-orig_a_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(KBUILD_SUBDIR_ASFLAGS) \
+orig_a_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) \
                  $(asflags-y) $(AFLAGS_$(basetarget).o)
 _a_flags       = $(filter-out $(AFLAGS_REMOVE_$(basetarget).o), $(orig_a_flags))
 _cpp_flags     = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(@F))
index 991db7d6e4df8bff58ac0e3b6b7799c0a195bba4..df4174405feb331a772abe871046d9260c43c690 100644 (file)
@@ -143,8 +143,7 @@ FORCE:
 # optimization, we don't need to read them if the target does not
 # exist, we will rebuild anyway in that case.
 
-targets := $(wildcard $(sort $(targets)))
-cmd_files := $(wildcard $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
+cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
 
 ifneq ($(cmd_files),)
   include $(cmd_files)
index 6f099f915dcfe15a46541af65d39327e6494f21f..94b664817ad91e2e48c8fef6361a20ab2a632763 100755 (executable)
@@ -83,8 +83,11 @@ def print_result(symboltype, symbolformat, argc):
     for d, n in delta:
         if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
 
-    print("Total: Before=%d, After=%d, chg %+.2f%%" % \
-        (otot, ntot, (ntot - otot)*100.0/otot))
+    if otot:
+        percent = (ntot - otot) * 100.0 / otot
+    else:
+        percent = 0
+    print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent))
 
 if sys.argv[1] == "-c":
     print_result("Function", "tT", 3)
index 8b80bac055e490219f97d913f33bce165960fe92..040aa79e1d9d39c55df7a748565dea3296ec6a6f 100755 (executable)
@@ -454,6 +454,7 @@ our $zero_initializer = qr{(?:(?:0[xX])?0+$Int_type?|NULL|false)\b};
 our $logFunctions = qr{(?x:
        printk(?:_ratelimited|_once|_deferred_once|_deferred|)|
        (?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)|
+       TP_printk|
        WARN(?:_RATELIMIT|_ONCE|)|
        panic|
        MODULE_[A-Z_]+|
@@ -2900,8 +2901,9 @@ sub process {
                                 $line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) {
                                $msg_type = "";
 
-                       # EFI_GUID is another special case
-                       } elsif ($line =~ /^\+.*\bEFI_GUID\s*\(/) {
+                       # More special cases
+                       } elsif ($line =~ /^\+.*\bEFI_GUID\s*\(/ ||
+                                $line =~ /^\+\s*(?:\w+)?\s*DEFINE_PER_CPU/) {
                                $msg_type = "";
 
                        # Otherwise set the alternate message types
@@ -3103,6 +3105,7 @@ sub process {
                      $line =~ /^\+[a-z_]*init/ ||
                      $line =~ /^\+\s*(?:static\s+)?[A-Z_]*ATTR/ ||
                      $line =~ /^\+\s*DECLARE/ ||
+                     $line =~ /^\+\s*builtin_[\w_]*driver/ ||
                      $line =~ /^\+\s*__setup/)) {
                        if (CHK("LINE_SPACING",
                                "Please use a blank line after function/struct/union/enum declarations\n" . $hereprev) &&
@@ -3182,6 +3185,12 @@ sub process {
 # check we are in a valid C source file if not then ignore this hunk
                next if ($realfile !~ /\.(h|c)$/);
 
+# check for unusual line ending [ or (
+               if ($line =~ /^\+.*([\[\(])\s*$/) {
+                       CHK("OPEN_ENDED_LINE",
+                           "Lines should not end with a '$1'\n" . $herecurr);
+               }
+
 # check if this appears to be the start function declaration, save the name
                if ($sline =~ /^\+\{\s*$/ &&
                    $prevline =~ /^\+(?:(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*)?($Ident)\(/) {
@@ -3829,28 +3838,10 @@ sub process {
                             "Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
                }
 
-# printk should use KERN_* levels.  Note that follow on printk's on the
-# same line do not need a level, so we use the current block context
-# to try and find and validate the current printk.  In summary the current
-# printk includes all preceding printk's which have no newline on the end.
-# we assume the first bad printk is the one to report.
-               if ($line =~ /\bprintk\((?!KERN_)\s*"/) {
-                       my $ok = 0;
-                       for (my $ln = $linenr - 1; $ln >= $first_line; $ln--) {
-                               #print "CHECK<$lines[$ln - 1]\n";
-                               # we have a preceding printk if it ends
-                               # with "\n" ignore it, else it is to blame
-                               if ($lines[$ln - 1] =~ m{\bprintk\(}) {
-                                       if ($rawlines[$ln - 1] !~ m{\\n"}) {
-                                               $ok = 1;
-                                       }
-                                       last;
-                               }
-                       }
-                       if ($ok == 0) {
-                               WARN("PRINTK_WITHOUT_KERN_LEVEL",
-                                    "printk() should include KERN_ facility level\n" . $herecurr);
-                       }
+# printk should use KERN_* levels
+               if ($line =~ /\bprintk\s*\(\s*(?!KERN_[A-Z]+\b)/) {
+                       WARN("PRINTK_WITHOUT_KERN_LEVEL",
+                            "printk() should include KERN_<LEVEL> facility level\n" . $herecurr);
                }
 
                if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
@@ -5762,7 +5753,7 @@ sub process {
                        for (my $count = $linenr; $count <= $lc; $count++) {
                                my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
                                $fmt =~ s/%%//g;
-                               if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNO]).)/) {
+                               if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNOx]).)/) {
                                        $bad_extension = $1;
                                        last;
                                }
@@ -5957,7 +5948,7 @@ sub process {
 
 # check for function declarations that have arguments without identifier names
                if (defined $stat &&
-                   $stat =~ /^.\s*(?:extern\s+)?$Type\s*$Ident\s*\(\s*([^{]+)\s*\)\s*;/s &&
+                   $stat =~ /^.\s*(?:extern\s+)?$Type\s*(?:$Ident|\(\s*\*\s*$Ident\s*\))\s*\(\s*([^{]+)\s*\)\s*;/s &&
                    $1 ne "void") {
                        my $args = trim($1);
                        while ($args =~ m/\s*($Type\s*(?:$Ident|\(\s*\*\s*$Ident?\s*\)\s*$balanced_parens)?)/g) {
@@ -6109,7 +6100,7 @@ sub process {
                                next if ($fline =~ /^.[\s$;]*$/);
                                $has_statement = 1;
                                $count++;
-                               $has_break = 1 if ($fline =~ /\bswitch\b|\b(?:break\s*;[\s$;]*$|return\b|goto\b|continue\b)/);
+                               $has_break = 1 if ($fline =~ /\bswitch\b|\b(?:break\s*;[\s$;]*$|exit\s*\(\b|return\b|goto\b|continue\b)/);
                        }
                        if (!$has_break && $has_statement) {
                                WARN("MISSING_BREAK",
index 28ad1feff9e12d07fbf1ff2369ca1ecf7e76dd85..ecfac64b39fe0c403d90e8700f92ca3aff400ead 100755 (executable)
@@ -30,12 +30,6 @@ else
        VERBOSE=0
 fi
 
-if [ -z "$J" ]; then
-       NPROC=$(getconf _NPROCESSORS_ONLN)
-else
-       NPROC="$J"
-fi
-
 FLAGS="--very-quiet"
 
 # You can use SPFLAGS to append extra arguments to coccicheck or override any
@@ -70,6 +64,9 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
     # Take only the last argument, which is the C file to test
     shift $(( $# - 1 ))
     OPTIONS="$COCCIINCLUDE $1"
+
+    # No need to parallelize Coccinelle since this mode takes one input file.
+    NPROC=1
 else
     ONLINE=0
     if [ "$KBUILD_EXTMOD" = "" ] ; then
@@ -77,6 +74,12 @@ else
     else
         OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
     fi
+
+    if [ -z "$J" ]; then
+        NPROC=$(getconf _NPROCESSORS_ONLN)
+    else
+        NPROC="$J"
+    fi
 fi
 
 if [ "$KBUILD_EXTMOD" != "" ] ; then
@@ -123,15 +126,8 @@ run_cmd_parmap() {
        if [ $VERBOSE -ne 0 ] ; then
                echo "Running ($NPROC in parallel): $@"
        fi
-       if [ "$DEBUG_FILE" != "/dev/null" -a "$DEBUG_FILE" != "" ]; then
-               if [ -f $DEBUG_FILE ]; then
-                       echo "Debug file $DEBUG_FILE exists, bailing"
-                       exit
-               fi
-       else
-               DEBUG_FILE="/dev/null"
-       fi
-       $@ 2>$DEBUG_FILE
+       echo $@ >>$DEBUG_FILE
+       $@ 2>>$DEBUG_FILE
        if [[ $? -ne 0 ]]; then
                echo "coccicheck failed"
                exit $?
@@ -176,8 +172,8 @@ OPTIONS="$OPTIONS $SPFLAGS"
 coccinelle () {
     COCCI="$1"
 
-    OPT=`grep "Option" $COCCI | cut -d':' -f2`
-    REQ=`grep "Requires" $COCCI | cut -d':' -f2 | sed "s| ||"`
+    OPT=`grep "Options:" $COCCI | cut -d':' -f2`
+    REQ=`grep "Requires:" $COCCI | cut -d':' -f2 | sed "s| ||"`
     REQ_NUM=$(echo $REQ | ${DIR}/scripts/ld-version.sh)
     if [ "$REQ_NUM" != "0" ] ; then
            if [ "$SPATCH_VERSION_NUM" -lt "$REQ_NUM" ] ; then
@@ -194,7 +190,7 @@ coccinelle () {
 
     if [ $VERBOSE -ne 0 -a $ONLINE -eq 0 ] ; then
 
-       FILE=`echo $COCCI | sed "s|$srctree/||"`
+       FILE=${COCCI#$srctree/}
 
        echo "Processing `basename $COCCI`"
        echo "with option(s) \"$OPT\""
@@ -247,6 +243,15 @@ coccinelle () {
 
 }
 
+if [ "$DEBUG_FILE" != "/dev/null" -a "$DEBUG_FILE" != "" ]; then
+       if [ -f $DEBUG_FILE ]; then
+               echo "Debug file $DEBUG_FILE exists, bailing"
+               exit
+       fi
+else
+       DEBUG_FILE="/dev/null"
+fi
+
 if [ "$COCCI" = "" ] ; then
     for f in `find $srctree/scripts/coccinelle/ -name '*.cocci' -type f | sort`; do
        coccinelle $f
diff --git a/scripts/coccinelle/api/check_bq27xxx_data.cocci b/scripts/coccinelle/api/check_bq27xxx_data.cocci
new file mode 100644 (file)
index 0000000..9212b85
--- /dev/null
@@ -0,0 +1,161 @@
+/// Detect BQ27XXX_DATA structures with identical registers, dm registers or
+/// properties.
+//# Doesn't unfold macros used in register or property fields.
+//# Requires OCaml scripting
+///
+// Confidence: High
+// Copyright: (C) 2017 Julia Lawall, Inria/LIP6, GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Requires: 1.0.7
+// Keywords: BQ27XXX_DATA
+
+virtual report
+
+@initialize:ocaml@
+@@
+
+let print_report p msg =
+  let p = List.hd p in
+  Printf.printf "%s:%d:%d-%d: %s" p.file p.line p.col p.col_end msg
+
+@str depends on report@
+type t;
+identifier i,i1,i2;
+expression e1,e2;
+@@
+
+t i[] = {
+  ...,
+  [e1] = BQ27XXX_DATA(i1,...),
+  ...,
+  [e2] = BQ27XXX_DATA(i2,...),
+  ...,
+};
+
+@script:ocaml tocheck@
+i1 << str.i1;
+i2 << str.i2;
+i1regs; i2regs;
+i1dmregs; i2dmregs;
+i1props; i2props;
+@@
+
+if not(i1 = i2)
+then
+  begin
+    i1regs := make_ident (i1 ^ "_regs");
+    i2regs := make_ident (i2 ^ "_regs");
+    i1dmregs := make_ident (i1 ^ "_dm_regs");
+    i2dmregs := make_ident (i2 ^ "_dm_regs");
+    i1props := make_ident (i1 ^ "_props");
+    i2props := make_ident (i2 ^ "_props")
+  end
+
+(* ---------------------------------------------------------------- *)
+
+@getregs1@
+typedef u8;
+identifier tocheck.i1regs;
+initializer list i1regs_vals;
+position p1;
+@@
+
+u8 i1regs@p1[...] = { i1regs_vals, };
+
+@getregs2@
+identifier tocheck.i2regs;
+initializer list i2regs_vals;
+position p2;
+@@
+
+u8 i2regs@p2[...] = { i2regs_vals, };
+
+@script:ocaml@
+(_,i1regs_vals) << getregs1.i1regs_vals;
+(_,i2regs_vals) << getregs2.i2regs_vals;
+i1regs << tocheck.i1regs;
+i2regs << tocheck.i2regs;
+p1 << getregs1.p1;
+p2 << getregs2.p2;
+@@
+
+if i1regs < i2regs &&
+   List.sort compare i1regs_vals = List.sort compare i2regs_vals
+then
+  let msg =
+    Printf.sprintf
+      "WARNING %s and %s (line %d) are identical\n"
+      i1regs i2regs (List.hd p2).line in
+  print_report p1 msg
+
+(* ---------------------------------------------------------------- *)
+
+@getdmregs1@
+identifier tocheck.i1dmregs;
+initializer list i1dmregs_vals;
+position p1;
+@@
+
+struct bq27xxx_dm_reg i1dmregs@p1[] = { i1dmregs_vals, };
+
+@getdmregs2@
+identifier tocheck.i2dmregs;
+initializer list i2dmregs_vals;
+position p2;
+@@
+
+struct bq27xxx_dm_reg i2dmregs@p2[] = { i2dmregs_vals, };
+
+@script:ocaml@
+(_,i1dmregs_vals) << getdmregs1.i1dmregs_vals;
+(_,i2dmregs_vals) << getdmregs2.i2dmregs_vals;
+i1dmregs << tocheck.i1dmregs;
+i2dmregs << tocheck.i2dmregs;
+p1 << getdmregs1.p1;
+p2 << getdmregs2.p2;
+@@
+
+if i1dmregs < i2dmregs &&
+   List.sort compare i1dmregs_vals = List.sort compare i2dmregs_vals
+then
+  let msg =
+    Printf.sprintf
+      "WARNING %s and %s (line %d) are identical\n"
+      i1dmregs i2dmregs (List.hd p2).line in
+  print_report p1 msg
+
+(* ---------------------------------------------------------------- *)
+
+@getprops1@
+identifier tocheck.i1props;
+initializer list[n1] i1props_vals;
+position p1;
+@@
+
+enum power_supply_property i1props@p1[] = { i1props_vals, };
+
+@getprops2@
+identifier tocheck.i2props;
+initializer list[n2] i2props_vals;
+position p2;
+@@
+
+enum power_supply_property i2props@p2[] = { i2props_vals, };
+
+@script:ocaml@
+(_,i1props_vals) << getprops1.i1props_vals;
+(_,i2props_vals) << getprops2.i2props_vals;
+i1props << tocheck.i1props;
+i2props << tocheck.i2props;
+p1 << getprops1.p1;
+p2 << getprops2.p2;
+@@
+
+if i1props < i2props &&
+   List.sort compare i1props_vals = List.sort compare i2props_vals
+then
+  let msg =
+    Printf.sprintf
+      "WARNING %s and %s (line %d) are identical\n"
+      i1props i2props (List.hd p2).line in
+  print_report p1 msg
diff --git a/scripts/coccinelle/api/setup_timer.cocci b/scripts/coccinelle/api/setup_timer.cocci
deleted file mode 100644 (file)
index eb6bd9e..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-/// Use setup_timer function instead of initializing timer with the function
-/// and data fields
-// Confidence: High
-// Copyright: (C) 2016 Vaishali Thakkar, Oracle. GPLv2
-// Options: --no-includes --include-headers
-// Keywords: init_timer, setup_timer
-
-virtual patch
-virtual context
-virtual org
-virtual report
-
-@match_immediate_function_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e, func, da;
-@@
-
--init_timer (&e);
-+setup_timer (&e, func, da);
-
-(
--e.function = func;
--e.data = da;
-|
--e.data = da;
--e.function = func;
-)
-
-@match_function_and_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e1, e2, e3, e4, e5, a, b;
-@@
-
--init_timer (&e1);
-+setup_timer (&e1, a, b);
-
-... when != a = e2
-    when != b = e3
-(
--e1.function = a;
-... when != b = e4
--e1.data = b;
-|
--e1.data = b;
-... when != a = e5
--e1.function = a;
-)
-
-@r1 exists@
-identifier f;
-position p;
-@@
-
-f(...) { ... when any
-  init_timer@p(...)
-  ... when any
-}
-
-@r2 exists@
-identifier g != r1.f;
-struct timer_list t;
-expression e8;
-@@
-
-g(...) { ... when any
-  t.data = e8
-  ... when any
-}
-
-// It is dangerous to use setup_timer if data field is initialized
-// in another function.
-
-@script:python depends on r2@
-p << r1.p;
-@@
-
-cocci.include_match(False)
-
-@r3 depends on patch && !context && !org && !report@
-expression e6, e7, c;
-position r1.p;
-@@
-
--init_timer@p (&e6);
-+setup_timer (&e6, c, 0UL);
-... when != c = e7
--e6.function = c;
-
-// ----------------------------------------------------------------------------
-
-@match_immediate_function_data_after_init_timer_context
-depends on !patch && (context || org || report)@
-expression da, e, func;
-position j0, j1, j2;
-@@
-
-* init_timer@j0 (&e);
-(
-* e@j1.function = func;
-* e@j2.data = da;
-|
-* e@j1.data = da;
-* e@j2.function = func;
-)
-
-@match_function_and_data_after_init_timer_context
-depends on !patch &&
-!match_immediate_function_data_after_init_timer_context &&
- (context || org || report)@
-expression a, b, e1, e2, e3, e4, e5;
-position j0, j1, j2;
-@@
-
-* init_timer@j0 (&e1);
-... when != a = e2
-    when != b = e3
-(
-* e1@j1.function = a;
-... when != b = e4
-* e1@j2.data = b;
-|
-* e1@j1.data = b;
-... when != a = e5
-* e1@j2.function = a;
-)
-
-@r3_context depends on !patch &&
-!match_immediate_function_data_after_init_timer_context &&
-!match_function_and_data_after_init_timer_context &&
- (context || org || report)@
-expression c, e6, e7;
-position r1.p;
-position j0, j1;
-@@
-
-* init_timer@j0@p (&e6);
-... when != c = e7
-* e6@j1.function = c;
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_org
-depends on org@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-j2 << match_immediate_function_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python match_function_and_data_after_init_timer_org depends on org@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-j2 << match_function_and_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python r3_org depends on org@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_report
-depends on report@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python match_function_and_data_after_init_timer_report depends on report@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python r3_report depends on report@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
index 873f444e7137f0fbbde99b683a382fbd93c3d472..be6f9f1abb343c07c42a696f60029d209a82df0d 100644 (file)
@@ -15,7 +15,7 @@ virtual context
 virtual org
 virtual report
 
-@r@
+@r exists@
 iterator name list_for_each_entry;
 expression x,E;
 position p1,p2;
index d0d00ef1f12ad0503113a1f0448ce75192f44506..ffe75407c5d2e44afbbe146347635c01f421609b 100644 (file)
@@ -3,10 +3,10 @@
 /// Sometimes, code after an if that is indented is actually intended to be
 /// part of the if branch.
 ///
-/// This has a high rate of false positives, because Coccinelle's column
-/// calculation does not distinguish between spaces and tabs, so code that
-/// is not visually aligned may be considered to be in the same column.
-///
+//# This has a high rate of false positives, because Coccinelle's column
+//# calculation does not distinguish between spaces and tabs, so code that
+//# is not visually aligned may be considered to be in the same column.
+//
 // Confidence: Low
 // Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
 // Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
index 81fabf37939033eae4224f9cf9a99320e6f765cc..08de5be73693d818912a390dd123049f3ff389b8 100644 (file)
@@ -14,7 +14,19 @@ virtual report
 virtual context
 
 @r@
-constant c;
+constant c,c1;
+identifier i,i1;
+position p;
+@@
+
+(
+ c1 + c - 1
+|
+ c1@i1 +@p c@i
+)
+
+@s@
+constant r.c, r.c1;
 identifier i;
 expression e;
 @@
@@ -27,28 +39,31 @@ e & c@i
 e |= c@i
 |
 e &= c@i
+|
+e | c1@i
+|
+e & c1@i
+|
+e |= c1@i
+|
+e &= c1@i
 )
 
-@s@
-constant r.c,c1;
-identifier i1;
-position p;
+@depends on s@
+position r.p;
+constant c1,c2;
 @@
 
-(
- c1 + c - 1
-|
-*c1@i1 +@p c
-)
+* c1 +@p c2
 
-@script:python depends on org@
-p << s.p;
+@script:python depends on s && org@
+p << r.p;
 @@
 
 cocci.print_main("sum of probable bitmasks, consider |",p)
 
-@script:python depends on report@
-p << s.p;
+@script:python depends on s && report@
+p << r.p;
 @@
 
 msg = "WARNING: sum of probable bitmasks, consider |"
index 5551da2b4fe32baaea72e0ec84c1a5b024756535..f597c8007b763b45e8b091c56c2f894521366a2b 100644 (file)
@@ -10,7 +10,7 @@
 // Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
 // Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
-// Comments: Requires Coccinelle version 1.0.0-rc20 or later
+// Requires: 1.0.0
 // Options:
 
 virtual patch
index 1f5ce959f5965b0249f70786e44f472581625955..39e07d8574dd787c2af71937852156abb9b1a7fb 100755 (executable)
 set -o errexit
 set -o nounset
 
+READELF="${CROSS_COMPILE}readelf"
+ADDR2LINE="${CROSS_COMPILE}addr2line"
+SIZE="${CROSS_COMPILE}size"
+NM="${CROSS_COMPILE}nm"
+
 command -v awk >/dev/null 2>&1 || die "awk isn't installed"
-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
+command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
+command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
 
 usage() {
        echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
@@ -69,10 +76,10 @@ die() {
 find_dir_prefix() {
        local objfile=$1
 
-       local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+       local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
        [[ -z $start_kernel_addr ]] && return
 
-       local file_line=$(addr2line -e $objfile $start_kernel_addr)
+       local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
        [[ -z $file_line ]] && return
 
        local prefix=${file_line%init/main.c:*}
@@ -104,7 +111,7 @@ __faddr2line() {
 
        # Go through each of the object's symbols which match the func name.
        # In rare cases there might be duplicates.
-       file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
+       file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
        while read symbol; do
                local fields=($symbol)
                local sym_base=0x${fields[0]}
@@ -156,10 +163,10 @@ __faddr2line() {
 
                # pass real address to addr2line
                echo "$func+$offset/$sym_size:"
-               addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
+               ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
                DONE=1
 
-       done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
+       done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
 }
 
 [[ $# -lt 2 ]] && usage
index bc443201d3ef00ac2b197da0896a186891cff188..99c96e86eccb64e1ecc89e096b673d4c88059707 100755 (executable)
@@ -57,6 +57,7 @@ my $sections = 0;
 my $file_emails = 0;
 my $from_filename = 0;
 my $pattern_depth = 0;
+my $self_test = undef;
 my $version = 0;
 my $help = 0;
 my $find_maintainer_files = 0;
@@ -138,6 +139,7 @@ my %VCS_cmds_git = (
     "subject_pattern" => "^GitSubject: (.*)",
     "stat_pattern" => "^(\\d+)\\t(\\d+)\\t\$file\$",
     "file_exists_cmd" => "git ls-files \$file",
+    "list_files_cmd" => "git ls-files \$file",
 );
 
 my %VCS_cmds_hg = (
@@ -167,6 +169,7 @@ my %VCS_cmds_hg = (
     "subject_pattern" => "^HgSubject: (.*)",
     "stat_pattern" => "^(\\d+)\t(\\d+)\t\$file\$",
     "file_exists_cmd" => "hg files \$file",
+    "list_files_cmd" => "hg manifest -R \$file",
 );
 
 my $conf = which_conf(".get_maintainer.conf");
@@ -216,6 +219,14 @@ if (-f $ignore_file) {
     close($ignore);
 }
 
+if ($#ARGV > 0) {
+    foreach (@ARGV) {
+        if ($_ =~ /^-{1,2}self-test(?:=|$)/) {
+            die "$P: using --self-test does not allow any other option or argument\n";
+        }
+    }
+}
+
 if (!GetOptions(
                'email!' => \$email,
                'git!' => \$email_git,
@@ -252,6 +263,7 @@ if (!GetOptions(
                'fe|file-emails!' => \$file_emails,
                'f|file' => \$from_filename,
                'find-maintainer-files' => \$find_maintainer_files,
+               'self-test:s' => \$self_test,
                'v|version' => \$version,
                'h|help|usage' => \$help,
                )) {
@@ -268,6 +280,12 @@ if ($version != 0) {
     exit 0;
 }
 
+if (defined $self_test) {
+    read_all_maintainer_files();
+    self_test();
+    exit 0;
+}
+
 if (-t STDIN && !@ARGV) {
     # We're talking to a terminal, but have no command line arguments.
     die "$P: missing patchfile or -f file - use --help if necessary\n";
@@ -311,14 +329,17 @@ if (!top_of_kernel_tree($lk_path)) {
 my @typevalue = ();
 my %keyword_hash;
 my @mfiles = ();
+my @self_test_info = ();
 
 sub read_maintainer_file {
     my ($file) = @_;
 
     open (my $maint, '<', "$file")
        or die "$P: Can't open MAINTAINERS file '$file': $!\n";
+    my $i = 1;
     while (<$maint>) {
        my $line = $_;
+       chomp $line;
 
        if ($line =~ m/^([A-Z]):\s*(.*)/) {
            my $type = $1;
@@ -338,9 +359,12 @@ sub read_maintainer_file {
            }
            push(@typevalue, "$type:$value");
        } elsif (!(/^\s*$/ || /^\s*\#/)) {
-           $line =~ s/\n$//g;
            push(@typevalue, $line);
        }
+       if (defined $self_test) {
+           push(@self_test_info, {file=>$file, linenr=>$i, line=>$line});
+       }
+       $i++;
     }
     close($maint);
 }
@@ -357,26 +381,30 @@ sub find_ignore_git {
     return grep { $_ !~ /^\.git$/; } @_;
 }
 
-if (-d "${lk_path}MAINTAINERS") {
-    opendir(DIR, "${lk_path}MAINTAINERS") or die $!;
-    my @files = readdir(DIR);
-    closedir(DIR);
-    foreach my $file (@files) {
-       push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);
+read_all_maintainer_files();
+
+sub read_all_maintainer_files {
+    if (-d "${lk_path}MAINTAINERS") {
+        opendir(DIR, "${lk_path}MAINTAINERS") or die $!;
+        my @files = readdir(DIR);
+        closedir(DIR);
+        foreach my $file (@files) {
+            push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);
+        }
     }
-}
 
-if ($find_maintainer_files) {
-    find( { wanted => \&find_is_maintainer_file,
-           preprocess => \&find_ignore_git,
-           no_chdir => 1,
-       }, "${lk_path}");
-} else {
-    push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";
-}
+    if ($find_maintainer_files) {
+        find( { wanted => \&find_is_maintainer_file,
+                preprocess => \&find_ignore_git,
+                no_chdir => 1,
+        }, "${lk_path}");
+    } else {
+        push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";
+    }
 
-foreach my $file (@mfiles) {
-    read_maintainer_file("$file");
+    foreach my $file (@mfiles) {
+        read_maintainer_file("$file");
+    }
 }
 
 #
@@ -586,6 +614,135 @@ if ($web) {
 
 exit($exit);
 
+sub self_test {
+    my @lsfiles = ();
+    my @good_links = ();
+    my @bad_links = ();
+    my @section_headers = ();
+    my $index = 0;
+
+    @lsfiles = vcs_list_files($lk_path);
+
+    for my $x (@self_test_info) {
+       $index++;
+
+       ## Section header duplication and missing section content
+       if (($self_test eq "" || $self_test =~ /\bsections\b/) &&
+           $x->{line} =~ /^\S[^:]/ &&
+           defined $self_test_info[$index] &&
+           $self_test_info[$index]->{line} =~ /^([A-Z]):\s*\S/) {
+           my $has_S = 0;
+           my $has_F = 0;
+           my $has_ML = 0;
+           my $status = "";
+           if (grep(m@^\Q$x->{line}\E@, @section_headers)) {
+               print("$x->{file}:$x->{linenr}: warning: duplicate section header\t$x->{line}\n");
+           } else {
+               push(@section_headers, $x->{line});
+           }
+           my $nextline = $index;
+           while (defined $self_test_info[$nextline] &&
+                  $self_test_info[$nextline]->{line} =~ /^([A-Z]):\s*(\S.*)/) {
+               my $type = $1;
+               my $value = $2;
+               if ($type eq "S") {
+                   $has_S = 1;
+                   $status = $value;
+               } elsif ($type eq "F" || $type eq "N") {
+                   $has_F = 1;
+               } elsif ($type eq "M" || $type eq "R" || $type eq "L") {
+                   $has_ML = 1;
+               }
+               $nextline++;
+           }
+           if (!$has_ML && $status !~ /orphan|obsolete/i) {
+               print("$x->{file}:$x->{linenr}: warning: section without email address\t$x->{line}\n");
+           }
+           if (!$has_S) {
+               print("$x->{file}:$x->{linenr}: warning: section without status \t$x->{line}\n");
+           }
+           if (!$has_F) {
+               print("$x->{file}:$x->{linenr}: warning: section without file pattern\t$x->{line}\n");
+           }
+       }
+
+       next if ($x->{line} !~ /^([A-Z]):\s*(.*)/);
+
+       my $type = $1;
+       my $value = $2;
+
+       ## Filename pattern matching
+       if (($type eq "F" || $type eq "X") &&
+           ($self_test eq "" || $self_test =~ /\bpatterns\b/)) {
+           $value =~ s@\.@\\\.@g;       ##Convert . to \.
+           $value =~ s/\*/\.\*/g;       ##Convert * to .*
+           $value =~ s/\?/\./g;         ##Convert ? to .
+           ##if pattern is a directory and it lacks a trailing slash, add one
+           if ((-d $value)) {
+               $value =~ s@([^/])$@$1/@;
+           }
+           if (!grep(m@^$value@, @lsfiles)) {
+               print("$x->{file}:$x->{linenr}: warning: no file matches\t$x->{line}\n");
+           }
+
+       ## Link reachability
+       } elsif (($type eq "W" || $type eq "Q" || $type eq "B") &&
+                $value =~ /^https?:/ &&
+                ($self_test eq "" || $self_test =~ /\blinks\b/)) {
+           next if (grep(m@^\Q$value\E$@, @good_links));
+           my $isbad = 0;
+           if (grep(m@^\Q$value\E$@, @bad_links)) {
+               $isbad = 1;
+           } else {
+               my $output = `wget --spider -q --no-check-certificate --timeout 10 --tries 1 $value`;
+               if ($? == 0) {
+                   push(@good_links, $value);
+               } else {
+                   push(@bad_links, $value);
+                   $isbad = 1;
+               }
+           }
+           if ($isbad) {
+               print("$x->{file}:$x->{linenr}: warning: possible bad link\t$x->{line}\n");
+           }
+
+       ## SCM reachability
+       } elsif ($type eq "T" &&
+                ($self_test eq "" || $self_test =~ /\bscm\b/)) {
+           next if (grep(m@^\Q$value\E$@, @good_links));
+           my $isbad = 0;
+           if (grep(m@^\Q$value\E$@, @bad_links)) {
+               $isbad = 1;
+            } elsif ($value !~ /^(?:git|quilt|hg)\s+\S/) {
+               print("$x->{file}:$x->{linenr}: warning: malformed entry\t$x->{line}\n");
+           } elsif ($value =~ /^git\s+(\S+)(\s+([^\(]+\S+))?/) {
+               my $url = $1;
+               my $branch = "";
+               $branch = $3 if $3;
+               my $output = `git ls-remote --exit-code -h "$url" $branch > /dev/null 2>&1`;
+               if ($? == 0) {
+                   push(@good_links, $value);
+               } else {
+                   push(@bad_links, $value);
+                   $isbad = 1;
+               }
+           } elsif ($value =~ /^(?:quilt|hg)\s+(https?:\S+)/) {
+               my $url = $1;
+               my $output = `wget --spider -q --no-check-certificate --timeout 10 --tries 1 $url`;
+               if ($? == 0) {
+                   push(@good_links, $value);
+               } else {
+                   push(@bad_links, $value);
+                   $isbad = 1;
+               }
+           }
+           if ($isbad) {
+               print("$x->{file}:$x->{linenr}: warning: possible bad link\t$x->{line}\n");
+           }
+       }
+    }
+}
+
 sub ignore_email_address {
     my ($address) = @_;
 
@@ -863,6 +1020,7 @@ Other options:
   --sections => print all of the subsystem sections with pattern matches
   --letters => print all matching 'letter' types from all matching sections
   --mailmap => use .mailmap file (default: $email_use_mailmap)
+  --self-test => show potential issues with MAINTAINERS file content
   --version => show version
   --help => show this help information
 
@@ -2192,6 +2350,23 @@ sub vcs_file_exists {
     return $exists;
 }
 
+sub vcs_list_files {
+    my ($file) = @_;
+
+    my @lsfiles = ();
+
+    my $vcs_used = vcs_exists();
+    return 0 if (!$vcs_used);
+
+    my $cmd = $VCS_cmds{"list_files_cmd"};
+    $cmd =~ s/(\$\w+)/$1/eeg;   # interpolate $cmd
+    @lsfiles = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+    return () if ($? != 0);
+
+    return @lsfiles;
+}
+
 sub uniq {
     my (@parms) = @_;
 
index 20136ffefb23b814fa8b300b70a07b821b3ec71f..3c8bd9bb4267a874cd1fa112d4a21b569bd56296 100644 (file)
@@ -1061,7 +1061,7 @@ struct symbol **sym_re_search(const char *pattern)
        }
        if (sym_match_arr) {
                qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp);
-               sym_arr = malloc((cnt+1) * sizeof(struct symbol));
+               sym_arr = malloc((cnt+1) * sizeof(struct symbol *));
                if (!sym_arr)
                        goto sym_re_search_free;
                for (i = 0; i < cnt; i++)
index 7bd52b8f63d48667cdb1d8ebe63896f2d8e3d176..bd29a92b4b48aa1648f8c892c980f1a72c2ad67e 100755 (executable)
@@ -58,6 +58,7 @@ Output format selection (mutually exclusive):
   -man                 Output troff manual page format. This is the default.
   -rst                 Output reStructuredText format.
   -text                        Output plain text format.
+  -none                        Do not output documentation, only warnings.
 
 Output selection (mutually exclusive):
   -export              Only output documentation for symbols that have been
@@ -532,6 +533,8 @@ while ($ARGV[0] =~ m/^-(.*)/) {
        $output_mode = "gnome";
        @highlights = @highlights_gnome;
        $blankline = $blankline_gnome;
+    } elsif ($cmd eq "-none") {
+       $output_mode = "none";
     } elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document
        $modulename = shift @ARGV;
     } elsif ($cmd eq "-function") { # to only output specific functions
@@ -2117,6 +2120,24 @@ sub output_blockhead_list(%) {
     }
 }
 
+
+## none mode output functions
+
+sub output_function_none(%) {
+}
+
+sub output_enum_none(%) {
+}
+
+sub output_typedef_none(%) {
+}
+
+sub output_struct_none(%) {
+}
+
+sub output_blockhead_none(%) {
+}
+
 ##
 # generic output function for all types (function, struct/union, typedef, enum);
 # calls the generated, variable output_ function name based on
@@ -3143,7 +3164,9 @@ sub process_file($) {
        }
     }
     if ($initial_section_counter == $section_counter) {
-       print STDERR "${file}:1: warning: no structured comments found\n";
+       if ($output_mode ne "none") {
+           print STDERR "${file}:1: warning: no structured comments found\n";
+       }
        if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) {
            print STDERR "    Was looking for '$_'.\n" for keys %function_table;
        }
index e6818b8e7141e6b9e021d8e781cb94b09293ef1a..c0d129d7f4304abfac7f2e7d699000df02678f73 100755 (executable)
@@ -188,10 +188,8 @@ sortextable()
 # Delete output files in case of error
 cleanup()
 {
-       rm -f .old_version
        rm -f .tmp_System.map
        rm -f .tmp_kallsyms*
-       rm -f .tmp_version
        rm -f .tmp_vmlinux*
        rm -f built-in.o
        rm -f System.map
@@ -239,12 +237,12 @@ esac
 
 # Update version
 info GEN .version
-if [ -r .version ]; then
-       rm -f .version;
-       echo 1 >.version;
+if [ -r .version ]; then
+       VERSION=$(expr 0$(cat .version) + 1)
+       echo $VERSION > .version
 else
-       mv .version .old_version;
-       expr 0$(cat .old_version) + 1 >.version;
+       rm -f .version
+       echo 1 > .version
 fi;
 
 # final build of init/
@@ -332,6 +330,3 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then
                exit 1
        fi
 fi
-
-# We made a new kernel - delete old version file
-rm -f .old_version
index 959199c3147ec79e2797e0c87cf47b0e03723a25..87f1fc9801d7330e86bfd39f9077c899ba8589cc 100755 (executable)
@@ -28,12 +28,7 @@ LC_ALL=C
 export LC_ALL
 
 if [ -z "$KBUILD_BUILD_VERSION" ]; then
-       if [ -r .version ]; then
-               VERSION=`cat .version`
-       else
-               VERSION=0
-               echo 0 > .version
-       fi
+       VERSION=$(cat .version 2>/dev/null || echo 1)
 else
        VERSION=$KBUILD_BUILD_VERSION
 fi
index 73f9f3192b9fbf8770e6ef632723d23e8efb5b63..c23534925b38173d8d5adeeee99b298bd6053960 100644 (file)
@@ -39,28 +39,28 @@ if test "$(objtree)" != "$(srctree)"; then \
        false; \
 fi ; \
 $(srctree)/scripts/setlocalversion --save-scmversion; \
-ln -sf $(srctree) $(2); \
 tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
-       $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
-rm -f $(2) $(objtree)/.scmversion
+       --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
+rm -f $(objtree)/.scmversion
 
 # rpm-pkg
 # ---------------------------------------------------------------------------
-rpm-pkg rpm: FORCE
+rpm-pkg: FORCE
        $(MAKE) clean
        $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
        $(call cmd,src_tar,$(KERNELPATH),kernel.spec)
-       rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
-       rm $(KERNELPATH).tar.gz kernel.spec
+       +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz \
+       --define='_smp_mflags %{nil}'
 
 # binrpm-pkg
 # ---------------------------------------------------------------------------
 binrpm-pkg: FORCE
        $(MAKE) KBUILD_SRC=
        $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
-       rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
+       +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
                $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
-       rm binkernel.spec
+
+clean-files += $(objtree)/*.spec
 
 # Deb target
 # ---------------------------------------------------------------------------
index 0bc87473f68f817b81640591f75f18c3b068351c..b4f0f2b3f8d2c6d495653c4fa81cbddbe8edbca8 100755 (executable)
@@ -408,9 +408,9 @@ EOF
        dpkg-source -cdebian/control -ldebian/changelog --format="3.0 (custom)" --target-format="3.0 (quilt)" \
                -b / ../${sourcename}_${version}.orig.tar.gz  ../${sourcename}_${packageversion}.debian.tar.gz
        mv ${sourcename}_${packageversion}*dsc ..
-       dpkg-genchanges > ../${sourcename}_${packageversion}_${debarch}.changes
+       dpkg-genchanges -Vkernel:debarch="${debarch}" > ../${sourcename}_${packageversion}_${debarch}.changes
 else
-       dpkg-genchanges -b > ../${sourcename}_${packageversion}_${debarch}.changes
+       dpkg-genchanges -b -Vkernel:debarch="${debarch}" > ../${sourcename}_${packageversion}_${debarch}.changes
 fi
 
 exit 0
index f47f17aae135188eae37db2178db6c9911a80023..280027fad991c1beeacc64fcc460eb6f6986d849 100755 (executable)
 #
 
 # how we were called determines which rpms we build and how we build them
-if [ "$1" = "prebuilt" ]; then
-       PREBUILT=true
+if [ "$1" = prebuilt ]; then
+       S=DEL
 else
-       PREBUILT=false
+       S=
 fi
 
-# starting to output the spec
-if [ "`grep CONFIG_DRM=y .config | cut -f2 -d\=`" = "y" ]; then
-       PROVIDES=kernel-drm
-fi
-
-PROVIDES="$PROVIDES kernel-$KERNELRELEASE"
-__KERNELRELEASE=`echo $KERNELRELEASE | sed -e "s/-/_/g"`
-
-echo "Name: kernel"
-echo "Summary: The Linux Kernel"
-echo "Version: $__KERNELRELEASE"
-echo "Release: $(cat .version 2>/dev/null || echo 1)"
-echo "License: GPL"
-echo "Group: System Environment/Kernel"
-echo "Vendor: The Linux Community"
-echo "URL: http://www.kernel.org"
-
-if ! $PREBUILT; then
-echo "Source: kernel-$__KERNELRELEASE.tar.gz"
-fi
-
-echo "BuildRoot: %{_tmppath}/%{name}-%{PACKAGE_VERSION}-root"
-echo "Provides: $PROVIDES"
-echo "%define __spec_install_post /usr/lib/rpm/brp-compress || :"
-echo "%define debug_package %{nil}"
-echo ""
-echo "%description"
-echo "The Linux Kernel, the operating system core itself"
-echo ""
-echo "%package headers"
-echo "Summary: Header files for the Linux kernel for use by glibc"
-echo "Group: Development/System"
-echo "Obsoletes: kernel-headers"
-echo "Provides: kernel-headers = %{version}"
-echo "%description headers"
-echo "Kernel-headers includes the C header files that specify the interface"
-echo "between the Linux kernel and userspace libraries and programs.  The"
-echo "header files define structures and constants that are needed for"
-echo "building most standard programs and are also needed for rebuilding the"
-echo "glibc package."
-echo ""
-echo "%package devel"
-echo "Summary: Development package for building kernel modules to match the $__KERNELRELEASE kernel"
-echo "Group: System Environment/Kernel"
-echo "AutoReqProv: no"
-echo "%description -n kernel-devel"
-echo "This package provides kernel headers and makefiles sufficient to build modules"
-echo "against the $__KERNELRELEASE kernel package."
-echo ""
-
-if ! $PREBUILT; then
-echo "%prep"
-echo "%setup -q"
-echo ""
+if grep -q CONFIG_MODULES=y .config; then
+       M=
+else
+       M=DEL
 fi
 
-echo "%build"
-
-if ! $PREBUILT; then
-echo "make clean && make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}"
-echo ""
+if grep -q CONFIG_DRM=y .config; then
+       PROVIDES=kernel-drm
 fi
 
-echo "%install"
-echo 'KBUILD_IMAGE=$(make image_name)'
-echo "%ifarch ia64"
-echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
-echo "%else"
-echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
-echo "%endif"
-
-echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
-echo "%ifarch ia64"
-echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
-echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
-echo "%else"
-echo "%ifarch ppc64"
-echo "cp vmlinux arch/powerpc/boot"
-echo "cp arch/powerpc/boot/"'$KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
-echo "%else"
-echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
-echo "%endif"
-echo "%endif"
-
-echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr KBUILD_SRC= headers_install'
-echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
-
-echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
-
-echo "%ifnarch ppc64"
-echo 'bzip2 -9 --keep vmlinux'
-echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
-echo "%endif"
-
-if ! $PREBUILT; then
-echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/build"
-echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/source"
-echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
-echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude .config.old --exclude .missing-syscalls.d\""
-echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
-echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
-echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
-echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
-fi
+PROVIDES="$PROVIDES kernel-$KERNELRELEASE"
+__KERNELRELEASE=$(echo $KERNELRELEASE | sed -e "s/-/_/g")
+EXCLUDES="$RCS_TAR_IGNORE --exclude=.tmp_versions --exclude=*vmlinux* \
+--exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation \
+--exclude=.config.old --exclude=.missing-syscalls.d"
 
-echo ""
-echo "%clean"
-echo 'rm -rf $RPM_BUILD_ROOT'
-echo ""
-echo "%post"
-echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
-echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
-echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
-echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
-echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
-echo "fi"
-echo ""
-echo "%preun"
-echo "if [ -x /sbin/new-kernel-pkg ]; then"
-echo "new-kernel-pkg --remove $KERNELRELEASE --rminitrd --initrdfile=/boot/initramfs-$KERNELRELEASE.img"
-echo "fi"
-echo ""
-echo "%postun"
-echo "if [ -x /sbin/update-bootloader ]; then"
-echo "/sbin/update-bootloader --remove $KERNELRELEASE"
-echo "fi"
-echo ""
-echo "%files"
-echo '%defattr (-, root, root)'
-echo "/lib/modules/$KERNELRELEASE"
-echo "%exclude /lib/modules/$KERNELRELEASE/build"
-echo "%exclude /lib/modules/$KERNELRELEASE/source"
-echo "/boot/*"
-echo ""
-echo "%files headers"
-echo '%defattr (-, root, root)'
-echo "/usr/include"
-echo ""
-if ! $PREBUILT; then
-echo "%files devel"
-echo '%defattr (-, root, root)'
-echo "/usr/src/kernels/$KERNELRELEASE"
-echo "/lib/modules/$KERNELRELEASE/build"
-echo "/lib/modules/$KERNELRELEASE/source"
-echo ""
-fi
+# We can label the here-doc lines for conditional output to the spec file
+#
+# Labels:
+#  $S: this line is enabled only when building source package
+#  $M: this line is enabled only when CONFIG_MODULES is enabled
+sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
+       Name: kernel
+       Summary: The Linux Kernel
+       Version: $__KERNELRELEASE
+       Release: $(cat .version 2>/dev/null || echo 1)
+       License: GPL
+       Group: System Environment/Kernel
+       Vendor: The Linux Community
+       URL: http://www.kernel.org
+$S     Source: kernel-$__KERNELRELEASE.tar.gz
+       Provides: $PROVIDES
+       %define __spec_install_post /usr/lib/rpm/brp-compress || :
+       %define debug_package %{nil}
+
+       %description
+       The Linux Kernel, the operating system core itself
+
+       %package headers
+       Summary: Header files for the Linux kernel for use by glibc
+       Group: Development/System
+       Obsoletes: kernel-headers
+       Provides: kernel-headers = %{version}
+       %description headers
+       Kernel-headers includes the C header files that specify the interface
+       between the Linux kernel and userspace libraries and programs.  The
+       header files define structures and constants that are needed for
+       building most standard programs and are also needed for rebuilding the
+       glibc package.
+
+$S$M   %package devel
+$S$M   Summary: Development package for building kernel modules to match the $__KERNELRELEASE kernel
+$S$M   Group: System Environment/Kernel
+$S$M   AutoReqProv: no
+$S$M   %description -n kernel-devel
+$S$M   This package provides kernel headers and makefiles sufficient to build modules
+$S$M   against the $__KERNELRELEASE kernel package.
+$S$M
+$S     %prep
+$S     %setup -q
+$S
+$S     %build
+$S     make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
+$S
+       %install
+       mkdir -p %{buildroot}/boot
+       %ifarch ia64
+       mkdir -p %{buildroot}/boot/efi
+       cp \$(make image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
+       ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/
+       %else
+       cp \$(make image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
+       %endif
+$M     make %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} KBUILD_SRC= modules_install
+       make %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr KBUILD_SRC= headers_install
+       cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
+       cp .config %{buildroot}/boot/config-$KERNELRELEASE
+       bzip2 -9 --keep vmlinux
+       mv vmlinux.bz2 %{buildroot}/boot/vmlinux-$KERNELRELEASE.bz2
+$S$M   rm -f %{buildroot}/lib/modules/$KERNELRELEASE/build
+$S$M   rm -f %{buildroot}/lib/modules/$KERNELRELEASE/source
+$S$M   mkdir -p %{buildroot}/usr/src/kernels/$KERNELRELEASE
+$S$M   tar cf - . $EXCLUDES | tar xf - -C %{buildroot}/usr/src/kernels/$KERNELRELEASE
+$S$M   cd %{buildroot}/lib/modules/$KERNELRELEASE
+$S$M   ln -sf /usr/src/kernels/$KERNELRELEASE build
+$S$M   ln -sf /usr/src/kernels/$KERNELRELEASE source
+
+       %clean
+       rm -rf %{buildroot}
+
+       %post
+       if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then
+       cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm
+       cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm
+       rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE
+       /sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm
+       rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm
+       fi
+
+       %preun
+       if [ -x /sbin/new-kernel-pkg ]; then
+       new-kernel-pkg --remove $KERNELRELEASE --rminitrd --initrdfile=/boot/initramfs-$KERNELRELEASE.img
+       fi
+
+       %postun
+       if [ -x /sbin/update-bootloader ]; then
+       /sbin/update-bootloader --remove $KERNELRELEASE
+       fi
+
+       %files
+       %defattr (-, root, root)
+$M     /lib/modules/$KERNELRELEASE
+$M     %exclude /lib/modules/$KERNELRELEASE/build
+$M     %exclude /lib/modules/$KERNELRELEASE/source
+       /boot/*
+
+       %files headers
+       %defattr (-, root, root)
+       /usr/include
+$S$M
+$S$M   %files devel
+$S$M   %defattr (-, root, root)
+$S$M   /usr/src/kernels/$KERNELRELEASE
+$S$M   /lib/modules/$KERNELRELEASE/build
+$S$M   /lib/modules/$KERNELRELEASE/source
+EOF
index 5dbd2faa24494910d75c99c4f054b89715d51ae2..255cef1b098d8ffdf82caf5022a965175f9f2685 100644 (file)
@@ -2,9 +2,44 @@
 # SPDX-License-Identifier: GPL-2.0
 
 use strict;
+use Getopt::Long qw(:config no_auto_abbrev);
+
+my $input_file = "MAINTAINERS";
+my $output_file = "MAINTAINERS.new";
+my $output_section = "SECTION.new";
+my $help = 0;
 
 my $P = $0;
 
+if (!GetOptions(
+               'input=s' => \$input_file,
+               'output=s' => \$output_file,
+               'section=s' => \$output_section,
+               'h|help|usage' => \$help,
+           )) {
+    die "$P: invalid argument - use --help if necessary\n";
+}
+
+if ($help != 0) {
+    usage();
+    exit 0;
+}
+
+sub usage {
+    print <<EOT;
+usage: $P [options] <pattern matching regexes>
+
+  --input => MAINTAINERS file to read (default: MAINTAINERS)
+  --output => sorted MAINTAINERS file to write (default: MAINTAINERS.new)
+  --section => new sorted MAINTAINERS file to write to (default: SECTION.new)
+
+If <pattern match regexes> exist, then the sections that match the
+regexes are not written to the output file but are written to the
+section file.
+
+EOT
+}
+
 # sort comparison functions
 sub by_category($$) {
     my ($a, $b) = @_;
@@ -56,13 +91,20 @@ sub trim {
 sub alpha_output {
     my ($hashref, $filename) = (@_);
 
+    return if ! scalar(keys %$hashref);
+
     open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n";
+    my $separator;
     foreach my $key (sort by_category keys %$hashref) {
        if ($key eq " ") {
-           chomp $$hashref{$key};
            print $file $$hashref{$key};
        } else {
-           print $file "\n" . $key . "\n";
+           if (! defined $separator) {
+               $separator = "\n";
+           } else {
+               print $file $separator;
+           }
+           print $file $key . "\n";
            foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
                print $file ($pattern . "\n");
            }
@@ -112,7 +154,7 @@ sub file_input {
 my %hash;
 my %new_hash;
 
-file_input(\%hash, "MAINTAINERS");
+file_input(\%hash, $input_file);
 
 foreach my $type (@ARGV) {
     foreach my $key (keys %hash) {
@@ -123,7 +165,7 @@ foreach my $type (@ARGV) {
     }
 }
 
-alpha_output(\%hash, "MAINTAINERS.new");
-alpha_output(\%new_hash, "SECTION.new");
+alpha_output(\%hash, $output_file);
+alpha_output(\%new_hash, $output_section);
 
 exit(0);
index e8049da1831fa9bf5ed0f049792e60c70162c5f2..b3048b894a3976d59764c8c8291773ec4fe4a4f9 100644 (file)
@@ -1,2 +1 @@
 subdir-y := mdp genheaders
-subdir-        += mdp genheaders
index aa0cc49ad1adc5d07ee4e0f9608809ce2e2de11a..9a058cff49d4a1d8fec6c6151cedffffc03d8232 100644 (file)
@@ -1187,6 +1187,10 @@ unknonw||unknown
 unknow||unknown
 unkown||unknown
 unneded||unneeded
+unneccecary||unnecessary
+unneccesary||unnecessary
+unneccessary||unnecessary
+unnecesary||unnecessary
 unneedingly||unnecessarily
 unnsupported||unsupported
 unmached||unmatched
index caaf51dda64812067e07d1c9079c7750c4296e11..d4fa04d914395393289eaea5c26b344d7668325c 100644 (file)
@@ -533,7 +533,7 @@ static ssize_t ns_revision_read(struct file *file, char __user *buf,
        long last_read;
        int avail;
 
-       mutex_lock(&rev->ns->lock);
+       mutex_lock_nested(&rev->ns->lock, rev->ns->level);
        last_read = rev->last_read;
        if (last_read == rev->ns->revision) {
                mutex_unlock(&rev->ns->lock);
@@ -543,7 +543,7 @@ static ssize_t ns_revision_read(struct file *file, char __user *buf,
                                             last_read !=
                                             READ_ONCE(rev->ns->revision)))
                        return -ERESTARTSYS;
-               mutex_lock(&rev->ns->lock);
+               mutex_lock_nested(&rev->ns->lock, rev->ns->level);
        }
 
        avail = sprintf(buffer, "%ld\n", rev->ns->revision);
@@ -577,7 +577,7 @@ static unsigned int ns_revision_poll(struct file *file, poll_table *pt)
        unsigned int mask = 0;
 
        if (rev) {
-               mutex_lock(&rev->ns->lock);
+               mutex_lock_nested(&rev->ns->lock, rev->ns->level);
                poll_wait(file, &rev->ns->wait, pt);
                if (rev->last_read < rev->ns->revision)
                        mask |= POLLIN | POLLRDNORM;
@@ -1643,7 +1643,7 @@ static int ns_mkdir_op(struct inode *dir, struct dentry *dentry, umode_t mode)
         */
        inode_unlock(dir);
        error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
-       mutex_lock(&parent->lock);
+       mutex_lock_nested(&parent->lock, parent->level);
        inode_lock_nested(dir, I_MUTEX_PARENT);
        if (error)
                goto out;
@@ -1692,7 +1692,7 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
        inode_unlock(dir);
        inode_unlock(dentry->d_inode);
 
-       mutex_lock(&parent->lock);
+       mutex_lock_nested(&parent->lock, parent->level);
        ns = aa_get_ns(__aa_findn_ns(&parent->sub_ns, dentry->d_name.name,
                                     dentry->d_name.len));
        if (!ns) {
@@ -1747,7 +1747,7 @@ void __aafs_ns_rmdir(struct aa_ns *ns)
                __aafs_profile_rmdir(child);
 
        list_for_each_entry(sub, &ns->sub_ns, base.list) {
-               mutex_lock(&sub->lock);
+               mutex_lock_nested(&sub->lock, sub->level);
                __aafs_ns_rmdir(sub);
                mutex_unlock(&sub->lock);
        }
@@ -1877,7 +1877,7 @@ int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
 
        /* subnamespaces */
        list_for_each_entry(sub, &ns->sub_ns, base.list) {
-               mutex_lock(&sub->lock);
+               mutex_lock_nested(&sub->lock, sub->level);
                error = __aafs_ns_mkdir(sub, ns_subns_dir(ns), NULL, NULL);
                mutex_unlock(&sub->lock);
                if (error)
@@ -1921,7 +1921,7 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
        /* is next namespace a child */
        if (!list_empty(&ns->sub_ns)) {
                next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
-               mutex_lock(&next->lock);
+               mutex_lock_nested(&next->lock, next->level);
                return next;
        }
 
@@ -1931,7 +1931,7 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
                mutex_unlock(&ns->lock);
                next = list_next_entry(ns, base.list);
                if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
-                       mutex_lock(&next->lock);
+                       mutex_lock_nested(&next->lock, next->level);
                        return next;
                }
                ns = parent;
@@ -2039,7 +2039,7 @@ static void *p_start(struct seq_file *f, loff_t *pos)
        f->private = root;
 
        /* find the first profile */
-       mutex_lock(&root->lock);
+       mutex_lock_nested(&root->lock, root->level);
        profile = __first_profile(root, root);
 
        /* skip to position */
@@ -2451,7 +2451,7 @@ static int __init aa_create_aafs(void)
        aafs_mnt = kern_mount(&aafs_ops);
        if (IS_ERR(aafs_mnt))
                panic("can't set apparmorfs up\n");
-       aafs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+       aafs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
 
        /* Populate fs tree. */
        error = entry_create_dir(&aa_sfs_entry, NULL);
@@ -2491,7 +2491,7 @@ static int __init aa_create_aafs(void)
        ns_subrevision(root_ns) = dent;
 
        /* policy tree referenced by magic policy symlink */
-       mutex_lock(&root_ns->lock);
+       mutex_lock_nested(&root_ns->lock, root_ns->level);
        error = __aafs_ns_mkdir(root_ns, aafs_mnt->mnt_root, ".policy",
                                aafs_mnt->mnt_root);
        mutex_unlock(&root_ns->lock);
index dd754b7850a82b4d129c11de0c55603de19268ba..04ba9d0718ea590b7c5033cfc4b952c701acb54d 100644 (file)
@@ -305,6 +305,7 @@ static int change_profile_perms(struct aa_profile *profile,
  * __attach_match_ - find an attachment match
  * @name - to match against  (NOT NULL)
  * @head - profile list to walk  (NOT NULL)
+ * @info - info message if there was an error (NOT NULL)
  *
  * Do a linear search on the profiles in the list.  There is a matching
  * preference where an exact match is preferred over a name which uses
@@ -316,28 +317,46 @@ static int change_profile_perms(struct aa_profile *profile,
  * Returns: profile or NULL if no match found
  */
 static struct aa_profile *__attach_match(const char *name,
-                                        struct list_head *head)
+                                        struct list_head *head,
+                                        const char **info)
 {
        int len = 0;
+       bool conflict = false;
        struct aa_profile *profile, *candidate = NULL;
 
        list_for_each_entry_rcu(profile, head, base.list) {
-               if (profile->label.flags & FLAG_NULL)
+               if (profile->label.flags & FLAG_NULL &&
+                   &profile->label == ns_unconfined(profile->ns))
                        continue;
-               if (profile->xmatch && profile->xmatch_len > len) {
-                       unsigned int state = aa_dfa_match(profile->xmatch,
-                                                         DFA_START, name);
-                       u32 perm = dfa_user_allow(profile->xmatch, state);
-                       /* any accepting state means a valid match. */
-                       if (perm & MAY_EXEC) {
-                               candidate = profile;
-                               len = profile->xmatch_len;
+
+               if (profile->xmatch) {
+                       if (profile->xmatch_len == len) {
+                               conflict = true;
+                               continue;
+                       } else if (profile->xmatch_len > len) {
+                               unsigned int state;
+                               u32 perm;
+
+                               state = aa_dfa_match(profile->xmatch,
+                                                    DFA_START, name);
+                               perm = dfa_user_allow(profile->xmatch, state);
+                               /* any accepting state means a valid match. */
+                               if (perm & MAY_EXEC) {
+                                       candidate = profile;
+                                       len = profile->xmatch_len;
+                                       conflict = false;
+                               }
                        }
                } else if (!strcmp(profile->base.name, name))
                        /* exact non-re match, no more searching required */
                        return profile;
        }
 
+       if (conflict) {
+               *info = "conflicting profile attachments";
+               return NULL;
+       }
+
        return candidate;
 }
 
@@ -346,16 +365,17 @@ static struct aa_profile *__attach_match(const char *name,
  * @ns: the current namespace  (NOT NULL)
  * @list: list to search  (NOT NULL)
  * @name: the executable name to match against  (NOT NULL)
+ * @info: info message if there was an error
  *
  * Returns: label or NULL if no match found
  */
 static struct aa_label *find_attach(struct aa_ns *ns, struct list_head *list,
-                                   const char *name)
+                                   const char *name, const char **info)
 {
        struct aa_profile *profile;
 
        rcu_read_lock();
-       profile = aa_get_profile(__attach_match(name, list));
+       profile = aa_get_profile(__attach_match(name, list, info));
        rcu_read_unlock();
 
        return profile ? &profile->label : NULL;
@@ -448,11 +468,11 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
                if (xindex & AA_X_CHILD)
                        /* released by caller */
                        new = find_attach(ns, &profile->base.profiles,
-                                               name);
+                                         name, info);
                else
                        /* released by caller */
                        new = find_attach(ns, &ns->base.profiles,
-                                               name);
+                                         name, info);
                *lookupname = name;
                break;
        }
@@ -516,7 +536,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
 
        if (profile_unconfined(profile)) {
                new = find_attach(profile->ns, &profile->ns->base.profiles,
-                                 name);
+                                 name, &info);
                if (new) {
                        AA_DEBUG("unconfined attached to new label");
                        return new;
@@ -541,9 +561,21 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
                }
        } else if (COMPLAIN_MODE(profile)) {
                /* no exec permission - learning mode */
-               struct aa_profile *new_profile = aa_new_null_profile(profile,
-                                                             false, name,
-                                                             GFP_ATOMIC);
+               struct aa_profile *new_profile = NULL;
+               char *n = kstrdup(name, GFP_ATOMIC);
+
+               if (n) {
+                       /* name is ptr into buffer */
+                       long pos = name - buffer;
+                       /* break per cpu buffer hold */
+                       put_buffers(buffer);
+                       new_profile = aa_new_null_profile(profile, false, n,
+                                                         GFP_KERNEL);
+                       get_buffers(buffer);
+                       name = buffer + pos;
+                       strcpy((char *)name, n);
+                       kfree(n);
+               }
                if (!new_profile) {
                        error = -ENOMEM;
                        info = "could not create null profile";
index 3382518b87fa507200679cb9ef660329a292debc..e79bf44396a36f60dde2e17fc68f2d0ac7d914b9 100644 (file)
@@ -226,18 +226,12 @@ static u32 map_old_perms(u32 old)
 struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
                                  struct path_cond *cond)
 {
-       struct aa_perms perms;
-
        /* FIXME: change over to new dfa format
         * currently file perms are encoded in the dfa, new format
         * splits the permissions from the dfa.  This mapping can be
         * done at profile load
         */
-       perms.deny = 0;
-       perms.kill = perms.stop = 0;
-       perms.complain = perms.cond = 0;
-       perms.hide = 0;
-       perms.prompt = 0;
+       struct aa_perms perms = { };
 
        if (uid_eq(current_fsuid(), cond->uid)) {
                perms.allow = map_old_perms(dfa_user_allow(dfa, state));
index f546707a2bbbe1c96a52c215baf492d70e23c00a..6505e1ad9e230605885f20f1e6f8df2029866cf2 100644 (file)
@@ -86,7 +86,7 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
 
 static inline bool path_mediated_fs(struct dentry *dentry)
 {
-       return !(dentry->d_sb->s_flags & MS_NOUSER);
+       return !(dentry->d_sb->s_flags & SB_NOUSER);
 }
 
 
index ad28e03a6f30341ab013e23cc2641f144ab44ea1..324fe5c60f8781c952138d0a764a5c2223f6353b 100644 (file)
@@ -2115,7 +2115,7 @@ void __aa_labelset_update_subtree(struct aa_ns *ns)
        __labelset_update(ns);
 
        list_for_each_entry(child, &ns->sub_ns, base.list) {
-               mutex_lock(&child->lock);
+               mutex_lock_nested(&child->lock, child->level);
                __aa_labelset_update_subtree(child);
                mutex_unlock(&child->lock);
        }
index 08ca26bcca7703c7f74e1531879eea4dd3bf2ac9..4d5e98e49d5e06a9066f618adabe2767da45ff3c 100644 (file)
@@ -317,14 +317,11 @@ static u32 map_other(u32 x)
 void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
                      struct aa_perms *perms)
 {
-       perms->deny = 0;
-       perms->kill = perms->stop = 0;
-       perms->complain = perms->cond = 0;
-       perms->hide = 0;
-       perms->prompt = 0;
-       perms->allow = dfa_user_allow(dfa, state);
-       perms->audit = dfa_user_audit(dfa, state);
-       perms->quiet = dfa_user_quiet(dfa, state);
+       *perms = (struct aa_perms) {
+               .allow = dfa_user_allow(dfa, state),
+               .audit = dfa_user_audit(dfa, state),
+               .quiet = dfa_user_quiet(dfa, state),
+       };
 
        /* for v5 perm mapping in the policydb, the other set is used
         * to extend the general perm set
@@ -426,7 +423,6 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
                   void (*cb)(struct audit_buffer *, void *))
 {
        int type, error;
-       bool stop = false;
        u32 denied = request & (~perms->allow | perms->deny);
 
        if (likely(!denied)) {
@@ -447,8 +443,6 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
                else
                        type = AUDIT_APPARMOR_DENIED;
 
-               if (denied & perms->stop)
-                       stop = true;
                if (denied == (denied & perms->hide))
                        error = -ENOENT;
 
index 17893fde44873ade4c28a62b7a3729e73d7c161d..9a65eeaf7dfa22ab3b76d05b1445c56031195cd3 100644 (file)
@@ -846,7 +846,7 @@ module_param_call(audit, param_set_audit, param_get_audit,
 /* Determines if audit header is included in audited messages.  This
  * provides more context if the audit daemon is not running
  */
-bool aa_g_audit_header = 1;
+bool aa_g_audit_header = true;
 module_param_named(audit_header, aa_g_audit_header, aabool,
                   S_IRUSR | S_IWUSR);
 
@@ -871,7 +871,7 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
  * DEPRECATED: read only as strict checking of load is always done now
  * that none root users (user namespaces) can load policy.
  */
-bool aa_g_paranoid_load = 1;
+bool aa_g_paranoid_load = true;
 module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
 
 /* Boot time disable flag */
@@ -1119,7 +1119,7 @@ static int __init apparmor_init(void)
 
        if (!apparmor_enabled || !security_module_enable("apparmor")) {
                aa_info_message("AppArmor disabled by boot time parameter");
-               apparmor_enabled = 0;
+               apparmor_enabled = false;
                return 0;
        }
 
@@ -1175,7 +1175,7 @@ alloc_out:
        aa_destroy_aafs();
        aa_teardown_dfa_engine();
 
-       apparmor_enabled = 0;
+       apparmor_enabled = false;
        return error;
 }
 
index 82a64b58041d2adc62debcf572b77b7a6607678d..ed9b4d0f9f7e212b161c1a312c52b56f5e0b0b49 100644 (file)
@@ -216,13 +216,12 @@ static unsigned int match_mnt_flags(struct aa_dfa *dfa, unsigned int state,
 static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa,
                                           unsigned int state)
 {
-       struct aa_perms perms;
-
-       perms.kill = 0;
-       perms.allow = dfa_user_allow(dfa, state);
-       perms.audit = dfa_user_audit(dfa, state);
-       perms.quiet = dfa_user_quiet(dfa, state);
-       perms.xindex = dfa_user_xindex(dfa, state);
+       struct aa_perms perms = {
+               .allow = dfa_user_allow(dfa, state),
+               .audit = dfa_user_audit(dfa, state),
+               .quiet = dfa_user_quiet(dfa, state),
+               .xindex = dfa_user_xindex(dfa, state),
+       };
 
        return perms;
 }
index 4243b0c3f0e4acc6d66c70ea878f32d548bebdd4..b0b58848c2487e69cca16f9bfd3ee21d466a12af 100644 (file)
@@ -502,7 +502,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
 {
        struct aa_profile *p, *profile;
        const char *bname;
-       char *name;
+       char *name = NULL;
 
        AA_BUG(!parent);
 
@@ -545,7 +545,7 @@ name:
        profile->file.dfa = aa_get_dfa(nulldfa);
        profile->policy.dfa = aa_get_dfa(nulldfa);
 
-       mutex_lock(&profile->ns->lock);
+       mutex_lock_nested(&profile->ns->lock, profile->ns->level);
        p = __find_child(&parent->base.profiles, bname);
        if (p) {
                aa_free_profile(profile);
@@ -562,6 +562,7 @@ out:
        return profile;
 
 fail:
+       kfree(name);
        aa_free_profile(profile);
        return NULL;
 }
@@ -905,7 +906,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
        } else
                ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label));
 
-       mutex_lock(&ns->lock);
+       mutex_lock_nested(&ns->lock, ns->level);
        /* check for duplicate rawdata blobs: space and file dedup */
        list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
                if (aa_rawdata_eq(rawdata_ent, udata)) {
@@ -1116,13 +1117,13 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
 
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
-               mutex_lock(&ns->parent->lock);
+               mutex_lock_nested(&ns->parent->lock, ns->level);
                __aa_remove_ns(ns);
                __aa_bump_ns_revision(ns);
                mutex_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
-               mutex_lock(&ns->lock);
+               mutex_lock_nested(&ns->lock, ns->level);
                profile = aa_get_profile(__lookup_profile(&ns->base, name));
                if (!profile) {
                        error = -ENOENT;
index 62a3589c62ab624156c0c01eaa8cadd72276866a..b1e629cba70b76f7b586ea07bfa0b77725c73e1d 100644 (file)
@@ -256,7 +256,8 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
        ns = alloc_ns(parent->base.hname, name);
        if (!ns)
                return NULL;
-       mutex_lock(&ns->lock);
+       ns->level = parent->level + 1;
+       mutex_lock_nested(&ns->lock, ns->level);
        error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
        if (error) {
                AA_ERROR("Failed to create interface for ns %s\n",
@@ -266,7 +267,6 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
                return ERR_PTR(error);
        }
        ns->parent = aa_get_ns(parent);
-       ns->level = parent->level + 1;
        list_add_rcu(&ns->base.list, &parent->sub_ns);
        /* add list ref */
        aa_get_ns(ns);
@@ -313,7 +313,7 @@ struct aa_ns *aa_prepare_ns(struct aa_ns *parent, const char *name)
 {
        struct aa_ns *ns;
 
-       mutex_lock(&parent->lock);
+       mutex_lock_nested(&parent->lock, parent->level);
        /* try and find the specified ns and if it doesn't exist create it */
        /* released by caller */
        ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
@@ -336,7 +336,7 @@ static void destroy_ns(struct aa_ns *ns)
        if (!ns)
                return;
 
-       mutex_lock(&ns->lock);
+       mutex_lock_nested(&ns->lock, ns->level);
        /* release all profiles in this namespace */
        __aa_profile_list_release(&ns->base.profiles);
 
index 4ede87c30f8b890a63e1aaae0a75a9ed6ba1045c..59a1a25b7d43f209b594d61c7fd38fb4e0e50f37 100644 (file)
@@ -157,7 +157,7 @@ static void do_loaddata_free(struct work_struct *work)
        struct aa_ns *ns = aa_get_ns(d->ns);
 
        if (ns) {
-               mutex_lock(&ns->lock);
+               mutex_lock_nested(&ns->lock, ns->level);
                __aa_fs_remove_rawdata(d);
                mutex_unlock(&ns->lock);
                aa_put_ns(ns);
index d8bc842594edd884b12412edf540f3fae5feb810..cf4d234febe94c9e96a8c0dc7df4d0132f856621 100644 (file)
@@ -47,7 +47,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
 /**
  * audit_resource - audit setting resource limit
  * @profile: profile being enforced  (NOT NULL)
- * @resoure: rlimit being auditing
+ * @resource: rlimit being auditing
  * @value: value being set
  * @error: error value
  *
@@ -128,7 +128,7 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
                error = fn_for_each(label, profile,
                                audit_resource(profile, resource,
                                               new_rlim->rlim_max, peer,
-                                              "cap_sys_resoure", -EACCES));
+                                              "cap_sys_resource", -EACCES));
        else
                error = fn_for_each_confined(label, profile,
                                profile_setrlimit(profile, resource, new_rlim));
index ec7dfa02c0519483818c40cce40ba0993fbd4712..65fbcf3c32c735b9547a9f77b4a94bd7666d87fd 100644 (file)
@@ -320,6 +320,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
        if (iint->flags & IMA_DIGSIG)
                return;
 
+       if (iint->ima_file_status != INTEGRITY_PASS)
+               return;
+
        rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
        if (rc < 0)
                return;
index afb3a9175d7686e2997f253d1ecad486fcd2badb..7207e6094dc1622c9a51beedc92288cdec244de1 100644 (file)
@@ -29,10 +29,10 @@ DECLARE_WORK(key_gc_work, key_garbage_collector);
 /*
  * Reaper for links from keyrings to dead keys.
  */
-static void key_gc_timer_func(unsigned long);
+static void key_gc_timer_func(struct timer_list *);
 static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
 
-static time_t key_gc_next_run = LONG_MAX;
+static time64_t key_gc_next_run = TIME64_MAX;
 static struct key_type *key_gc_dead_keytype;
 
 static unsigned long key_gc_flags;
@@ -53,12 +53,12 @@ struct key_type key_type_dead = {
  * Schedule a garbage collection run.
  * - time precision isn't particularly important
  */
-void key_schedule_gc(time_t gc_at)
+void key_schedule_gc(time64_t gc_at)
 {
        unsigned long expires;
-       time_t now = current_kernel_time().tv_sec;
+       time64_t now = ktime_get_real_seconds();
 
-       kenter("%ld", gc_at - now);
+       kenter("%lld", gc_at - now);
 
        if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
                kdebug("IMMEDIATE");
@@ -84,10 +84,10 @@ void key_schedule_gc_links(void)
  * Some key's cleanup time was met after it expired, so we need to get the
  * reaper to go through a cycle finding expired keys.
  */
-static void key_gc_timer_func(unsigned long data)
+static void key_gc_timer_func(struct timer_list *unused)
 {
        kenter("");
-       key_gc_next_run = LONG_MAX;
+       key_gc_next_run = TIME64_MAX;
        key_schedule_gc_links();
 }
 
@@ -184,11 +184,11 @@ static void key_garbage_collector(struct work_struct *work)
 
        struct rb_node *cursor;
        struct key *key;
-       time_t new_timer, limit;
+       time64_t new_timer, limit;
 
        kenter("[%lx,%x]", key_gc_flags, gc_state);
 
-       limit = current_kernel_time().tv_sec;
+       limit = ktime_get_real_seconds();
        if (limit > key_gc_delay)
                limit -= key_gc_delay;
        else
@@ -204,7 +204,7 @@ static void key_garbage_collector(struct work_struct *work)
                gc_state |= KEY_GC_REAPING_DEAD_1;
        kdebug("new pass %x", gc_state);
 
-       new_timer = LONG_MAX;
+       new_timer = TIME64_MAX;
 
        /* As only this function is permitted to remove things from the key
         * serial tree, if cursor is non-NULL then it will always point to a
@@ -235,7 +235,7 @@ continue_scanning:
 
                if (gc_state & KEY_GC_SET_TIMER) {
                        if (key->expiry > limit && key->expiry < new_timer) {
-                               kdebug("will expire %x in %ld",
+                               kdebug("will expire %x in %lld",
                                       key_serial(key), key->expiry - limit);
                                new_timer = key->expiry;
                        }
@@ -276,7 +276,7 @@ maybe_resched:
         */
        kdebug("pass complete");
 
-       if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
+       if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
                new_timer += key_gc_delay;
                key_schedule_gc(new_timer);
        }
index 503adbae7b0dd0b096aa7fd672fddb0d3513d115..9f8208dc0e55829c7e5821b3f123148e5f7dc801 100644 (file)
@@ -130,7 +130,7 @@ struct keyring_search_context {
        int                     skipped_ret;
        bool                    possessed;
        key_ref_t               result;
-       struct timespec         now;
+       time64_t                now;
 };
 
 extern bool key_default_cmp(const struct key *key,
@@ -169,10 +169,10 @@ extern void key_change_session_keyring(struct callback_head *twork);
 
 extern struct work_struct key_gc_work;
 extern unsigned key_gc_delay;
-extern void keyring_gc(struct key *keyring, time_t limit);
+extern void keyring_gc(struct key *keyring, time64_t limit);
 extern void keyring_restriction_gc(struct key *keyring,
                                   struct key_type *dead_type);
-extern void key_schedule_gc(time_t gc_at);
+extern void key_schedule_gc(time64_t gc_at);
 extern void key_schedule_gc_links(void);
 extern void key_gc_keytype(struct key_type *ktype);
 
@@ -211,7 +211,7 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
 /*
  * Determine whether a key is dead.
  */
-static inline bool key_is_dead(const struct key *key, time_t limit)
+static inline bool key_is_dead(const struct key *key, time64_t limit)
 {
        return
                key->flags & ((1 << KEY_FLAG_DEAD) |
index 83bf4b4afd49d24ba80209a9bff71a558bd01b6c..66049183ad8961554cbf0a4ea0ccf403bd0b7579 100644 (file)
@@ -460,7 +460,7 @@ static int __key_instantiate_and_link(struct key *key,
                        if (authkey)
                                key_revoke(authkey);
 
-                       if (prep->expiry != TIME_T_MAX) {
+                       if (prep->expiry != TIME64_MAX) {
                                key->expiry = prep->expiry;
                                key_schedule_gc(prep->expiry + key_gc_delay);
                        }
@@ -506,7 +506,7 @@ int key_instantiate_and_link(struct key *key,
        prep.data = data;
        prep.datalen = datalen;
        prep.quotalen = key->type->def_datalen;
-       prep.expiry = TIME_T_MAX;
+       prep.expiry = TIME64_MAX;
        if (key->type->preparse) {
                ret = key->type->preparse(&prep);
                if (ret < 0)
@@ -570,7 +570,6 @@ int key_reject_and_link(struct key *key,
                        struct key *authkey)
 {
        struct assoc_array_edit *edit;
-       struct timespec now;
        int ret, awaken, link_ret = 0;
 
        key_check(key);
@@ -593,8 +592,7 @@ int key_reject_and_link(struct key *key,
                /* mark the key as being negatively instantiated */
                atomic_inc(&key->user->nikeys);
                mark_key_instantiated(key, -error);
-               now = current_kernel_time();
-               key->expiry = now.tv_sec + timeout;
+               key->expiry = ktime_get_real_seconds() + timeout;
                key_schedule_gc(key->expiry + key_gc_delay);
 
                if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
@@ -710,16 +708,13 @@ found_kernel_type:
 
 void key_set_timeout(struct key *key, unsigned timeout)
 {
-       struct timespec now;
-       time_t expiry = 0;
+       time64_t expiry = 0;
 
        /* make the changes with the locks held to prevent races */
        down_write(&key->sem);
 
-       if (timeout > 0) {
-               now = current_kernel_time();
-               expiry = now.tv_sec + timeout;
-       }
+       if (timeout > 0)
+               expiry = ktime_get_real_seconds() + timeout;
 
        key->expiry = expiry;
        key_schedule_gc(key->expiry + key_gc_delay);
@@ -850,7 +845,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
        prep.data = payload;
        prep.datalen = plen;
        prep.quotalen = index_key.type->def_datalen;
-       prep.expiry = TIME_T_MAX;
+       prep.expiry = TIME64_MAX;
        if (index_key.type->preparse) {
                ret = index_key.type->preparse(&prep);
                if (ret < 0) {
@@ -994,7 +989,7 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
        prep.data = payload;
        prep.datalen = plen;
        prep.quotalen = key->type->def_datalen;
-       prep.expiry = TIME_T_MAX;
+       prep.expiry = TIME64_MAX;
        if (key->type->preparse) {
                ret = key->type->preparse(&prep);
                if (ret < 0)
@@ -1028,8 +1023,7 @@ EXPORT_SYMBOL(key_update);
  */
 void key_revoke(struct key *key)
 {
-       struct timespec now;
-       time_t time;
+       time64_t time;
 
        key_check(key);
 
@@ -1044,8 +1038,7 @@ void key_revoke(struct key *key)
                key->type->revoke(key);
 
        /* set the death time to no more than the expiry time */
-       now = current_kernel_time();
-       time = now.tv_sec;
+       time = ktime_get_real_seconds();
        if (key->revoked_at == 0 || key->revoked_at > time) {
                key->revoked_at = time;
                key_schedule_gc(key->revoked_at + key_gc_delay);
index 36f842ec87f04580dfd051f4e50d395242a69eb6..d0bccebbd3b51cedb842ce574f65d448856650c0 100644 (file)
@@ -565,7 +565,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
 
        /* skip invalidated, revoked and expired keys */
        if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
-               time_t expiry = READ_ONCE(key->expiry);
+               time64_t expiry = READ_ONCE(key->expiry);
 
                if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
                              (1 << KEY_FLAG_REVOKED))) {
@@ -574,7 +574,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
                        goto skipped;
                }
 
-               if (expiry && ctx->now.tv_sec >= expiry) {
+               if (expiry && ctx->now >= expiry) {
                        if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
                                ctx->result = ERR_PTR(-EKEYEXPIRED);
                        kleave(" = %d [expire]", ctx->skipped_ret);
@@ -834,10 +834,10 @@ found:
        key = key_ref_to_ptr(ctx->result);
        key_check(key);
        if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
-               key->last_used_at = ctx->now.tv_sec;
-               keyring->last_used_at = ctx->now.tv_sec;
+               key->last_used_at = ctx->now;
+               keyring->last_used_at = ctx->now;
                while (sp > 0)
-                       stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+                       stack[--sp].keyring->last_used_at = ctx->now;
        }
        kleave(" = true");
        return true;
@@ -898,7 +898,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
        }
 
        rcu_read_lock();
-       ctx->now = current_kernel_time();
+       ctx->now = ktime_get_real_seconds();
        if (search_nested_keyrings(keyring, ctx))
                __key_get(key_ref_to_ptr(ctx->result));
        rcu_read_unlock();
@@ -1149,7 +1149,7 @@ struct key *find_keyring_by_name(const char *name, bool uid_keyring)
                         * (ie. it has a zero usage count) */
                        if (!refcount_inc_not_zero(&keyring->usage))
                                continue;
-                       keyring->last_used_at = current_kernel_time().tv_sec;
+                       keyring->last_used_at = ktime_get_real_seconds();
                        goto out;
                }
        }
@@ -1489,7 +1489,7 @@ static void keyring_revoke(struct key *keyring)
 static bool keyring_gc_select_iterator(void *object, void *iterator_data)
 {
        struct key *key = keyring_ptr_to_key(object);
-       time_t *limit = iterator_data;
+       time64_t *limit = iterator_data;
 
        if (key_is_dead(key, *limit))
                return false;
@@ -1500,7 +1500,7 @@ static bool keyring_gc_select_iterator(void *object, void *iterator_data)
 static int keyring_gc_check_iterator(const void *object, void *iterator_data)
 {
        const struct key *key = keyring_ptr_to_key(object);
-       time_t *limit = iterator_data;
+       time64_t *limit = iterator_data;
 
        key_check(key);
        return key_is_dead(key, *limit);
@@ -1512,7 +1512,7 @@ static int keyring_gc_check_iterator(const void *object, void *iterator_data)
  * Not called with any locks held.  The keyring's key struct will not be
  * deallocated under us as only our caller may deallocate it.
  */
-void keyring_gc(struct key *keyring, time_t limit)
+void keyring_gc(struct key *keyring, time64_t limit)
 {
        int result;
 
index a72b4dd70c8abfab5d05503415c4a5fcef7b6df3..f68dc04d614e24badb3ca1058fcb023c2a54df94 100644 (file)
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(key_task_permission);
 int key_validate(const struct key *key)
 {
        unsigned long flags = READ_ONCE(key->flags);
-       time_t expiry = READ_ONCE(key->expiry);
+       time64_t expiry = READ_ONCE(key->expiry);
 
        if (flags & (1 << KEY_FLAG_INVALIDATED))
                return -ENOKEY;
@@ -101,8 +101,7 @@ int key_validate(const struct key *key)
 
        /* check it hasn't expired */
        if (expiry) {
-               struct timespec now = current_kernel_time();
-               if (now.tv_sec >= expiry)
+               if (ktime_get_real_seconds() >= expiry)
                        return -EKEYEXPIRED;
        }
 
index 6d1fcbba1e0961927c9b3a6027fcd8084a21f80a..fbc4af5c6c9ffaf273dc6c78d2ab8e644c27f8d3 100644 (file)
@@ -178,13 +178,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
 {
        struct rb_node *_p = v;
        struct key *key = rb_entry(_p, struct key, serial_node);
-       struct timespec now;
-       time_t expiry;
-       unsigned long timo;
        unsigned long flags;
        key_ref_t key_ref, skey_ref;
+       time64_t now, expiry;
        char xbuf[16];
        short state;
+       u64 timo;
        int rc;
 
        struct keyring_search_context ctx = {
@@ -215,7 +214,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
        if (rc < 0)
                return 0;
 
-       now = current_kernel_time();
+       now = ktime_get_real_seconds();
 
        rcu_read_lock();
 
@@ -223,21 +222,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
        expiry = READ_ONCE(key->expiry);
        if (expiry == 0) {
                memcpy(xbuf, "perm", 5);
-       } else if (now.tv_sec >= expiry) {
+       } else if (now >= expiry) {
                memcpy(xbuf, "expd", 5);
        } else {
-               timo = expiry - now.tv_sec;
+               timo = expiry - now;
 
                if (timo < 60)
-                       sprintf(xbuf, "%lus", timo);
+                       sprintf(xbuf, "%llus", timo);
                else if (timo < 60*60)
-                       sprintf(xbuf, "%lum", timo / 60);
+                       sprintf(xbuf, "%llum", div_u64(timo, 60));
                else if (timo < 60*60*24)
-                       sprintf(xbuf, "%luh", timo / (60*60));
+                       sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60));
                else if (timo < 60*60*24*7)
-                       sprintf(xbuf, "%lud", timo / (60*60*24));
+                       sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24));
                else
-                       sprintf(xbuf, "%luw", timo / (60*60*24*7));
+                       sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7));
        }
 
        state = key_read_state(key);
index 740affd65ee98464e19fc32f830f86a1ad3f24f4..d5b25e535d3a5ad2ef567015bc4915d06b9f3f5e 100644 (file)
@@ -738,7 +738,7 @@ try_again:
        if (ret < 0)
                goto invalid_key;
 
-       key->last_used_at = current_kernel_time().tv_sec;
+       key->last_used_at = ktime_get_real_seconds();
 
 error:
        put_cred(ctx.cred);
index a93a4235a33287864bb5a5047b571355771cd966..10e7ef7a8804b1ba0d88b6a0c4ad478f38a9397e 100644 (file)
@@ -248,8 +248,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
                                runtime->rate);
                *audio_tstamp = ns_to_timespec(audio_nsecs);
        }
-       runtime->status->audio_tstamp = *audio_tstamp;
-       runtime->status->tstamp = *curr_tstamp;
+       if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
+               runtime->status->audio_tstamp = *audio_tstamp;
+               runtime->status->tstamp = *curr_tstamp;
+       }
 
        /*
         * re-take a driver timestamp to let apps detect if the reference tstamp
index 59127b6ef39ee97b1abf66767b44a6933fa367e4..e00f7e399e462a1926c7f94432fe204c0823b223 100644 (file)
@@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file,
        struct snd_timer *t;
 
        tu = file->private_data;
-       if (snd_BUG_ON(!tu->timeri))
-               return -ENXIO;
+       if (!tu->timeri)
+               return -EBADFD;
        t = tu->timeri->timer;
-       if (snd_BUG_ON(!t))
-               return -ENXIO;
+       if (!t)
+               return -EBADFD;
        memset(&info, 0, sizeof(info));
        info.card = t->card ? t->card->number : -1;
        if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
@@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file,
        struct snd_timer_status32 status;
        
        tu = file->private_data;
-       if (snd_BUG_ON(!tu->timeri))
-               return -ENXIO;
+       if (!tu->timeri)
+               return -EBADFD;
        memset(&status, 0, sizeof(status));
        status.tstamp.tv_sec = tu->tstamp.tv_sec;
        status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
index e43af18d43836367e263356eb2377cc4e08e8368..8632301489fa66e9973b40330ac28a75026c20af 100644 (file)
@@ -495,7 +495,9 @@ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
  * Returns 0 if successful, or a negative error code.
  */
 int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
-                                int (*func)(struct snd_kcontrol *, void *),
+                                int (*func)(struct snd_kcontrol *vslave,
+                                            struct snd_kcontrol *slave,
+                                            void *arg),
                                 void *arg)
 {
        struct link_master *master;
@@ -507,7 +509,7 @@ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
        if (err < 0)
                return err;
        list_for_each_entry(slave, &master->slaves, list) {
-               err = func(&slave->slave, arg);
+               err = func(slave->kctl, &slave->slave, arg);
                if (err < 0)
                        return err;
        }
index 81acc20c2535870d7154e192fb5829d005540fac..f21633cd9b38ea9a8f6a3c955f9209a8f66ecf2f 100644 (file)
@@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
        memset(pcm_chmap, 0, sizeof(pcm_chmap));
        chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
 
-       for (i = 0; i < sizeof(chmap); i++)
+       for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
                ucontrol->value.integer.value[i] = pcm_chmap[i];
 
        return 0;
index c1f8e5479bf31b204e2224a8d3b1dcece12e365b..e018ecbf78a8f88e801990f1603218faaebba5f4 100644 (file)
@@ -1823,7 +1823,9 @@ struct slave_init_arg {
 };
 
 /* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
-static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
+static int init_slave_0dB(struct snd_kcontrol *slave,
+                         struct snd_kcontrol *kctl,
+                         void *_arg)
 {
        struct slave_init_arg *arg = _arg;
        int _tlv[4];
@@ -1860,7 +1862,7 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
        arg->step = step;
        val = -tlv[2] / step;
        if (val > 0) {
-               put_kctl_with_value(kctl, val);
+               put_kctl_with_value(slave, val);
                return val;
        }
 
@@ -1868,7 +1870,9 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
 }
 
 /* unmute the slave via snd_ctl_apply_vmaster_slaves() */
-static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
+static int init_slave_unmute(struct snd_kcontrol *slave,
+                            struct snd_kcontrol *kctl,
+                            void *_arg)
 {
        return put_kctl_with_value(slave, 1);
 }
index f958d8d54d159ccdc957d13cacbfb92973e15db4..c71dcacea807bf0e0d11aa147401acd12280686c 100644 (file)
@@ -2463,6 +2463,9 @@ static const struct pci_device_id azx_ids[] = {
        /* AMD Hudson */
        { PCI_DEVICE(0x1022, 0x780d),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+       /* AMD Raven */
+       { PCI_DEVICE(0x1022, 0x15e3),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x0002),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
index db1a376e27c016d1f36fff94dd695895a643afa5..921a10eff43a36ad76bb1631d6de41f52df32c17 100644 (file)
@@ -341,6 +341,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0299:
                alc_update_coef_idx(codec, 0x10, 1<<9, 0);
                break;
+       case 0x10ec0275:
+               alc_update_coef_idx(codec, 0xe, 0, 1<<0);
+               break;
        case 0x10ec0293:
                alc_update_coef_idx(codec, 0xa, 1<<13, 0);
                break;
@@ -6452,6 +6455,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                ALC225_STANDARD_PINS,
                {0x12, 0xb7a60130},
                {0x1b, 0x90170110}),
+       SND_HDA_PIN_QUIRK(0x10ec0233, 0x8086, "Intel NUC Skull Canyon", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x1b, 0x01111010},
+               {0x1e, 0x01451130},
+               {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60140},
                {0x14, 0x90170110},
@@ -6887,7 +6894,7 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0703:
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
-               alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+               alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
                break;
 
        }
index bb8be10b843701f3ad8ea5bf22b194b40dbe24c8..7b49d04e3c6002ccffeefaacb33a1c17d83efadd 100644 (file)
@@ -34,6 +34,11 @@ config SND_SOC_INTEL_SST_TOPLEVEL
        depends on X86 || COMPILE_TEST
        select SND_SOC_INTEL_MACH
        select SND_SOC_INTEL_COMMON
+       help
+          Intel ASoC Audio Drivers. If you have a Intel machine that
+          has audio controller with a DSP and I2S or DMIC port, then
+          enable this option by saying Y or M
+          If unsure select "N".
 
 config SND_SOC_INTEL_HASWELL
        tristate "Intel ASoC SST driver for Haswell/Broadwell"
index 26dd5f20f1494320735b47f60be79eab70b6e111..eb3396ffba4c4cee0bc986e0e6d2ea99ab9a45bc 100644 (file)
@@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
        while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
                                             ctrl_iface->extralen,
                                             cs, UAC2_CLOCK_SOURCE))) {
-               if (cs->bClockID == clock_id)
+               if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
                        return cs;
        }
 
@@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
        while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
                                             ctrl_iface->extralen,
                                             cs, UAC2_CLOCK_SELECTOR))) {
-               if (cs->bClockID == clock_id)
+               if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
+                       if (cs->bLength < 5 + cs->bNrInPins)
+                               return NULL;
                        return cs;
+               }
        }
 
        return NULL;
@@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
        while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
                                             ctrl_iface->extralen,
                                             cs, UAC2_CLOCK_MULTIPLIER))) {
-               if (cs->bClockID == clock_id)
+               if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
                        return cs;
        }
 
index 4f9613e5fc9ec2a967456d5d84e67bdc5347f239..c1376bfdc90b2add14a7f9e2804b4e6ac6c9e534 100644 (file)
@@ -201,7 +201,7 @@ static int line6_send_raw_message_async_part(struct message *msg,
 void line6_start_timer(struct timer_list *timer, unsigned long msecs,
                       void (*function)(struct timer_list *t))
 {
-       timer->function = (TIMER_FUNC_TYPE)function;
+       timer->function = function;
        mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL_GPL(line6_start_timer);
index 91bc8f18791e46bc80adfb8503e310e2fdc7a298..61b348383de88fa282028a78603567fa513589b7 100644 (file)
@@ -1469,6 +1469,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
        __u8 *bmaControls;
 
        if (state->mixer->protocol == UAC_VERSION_1) {
+               if (hdr->bLength < 7) {
+                       usb_audio_err(state->chip,
+                                     "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+                                     unitid);
+                       return -EINVAL;
+               }
                csize = hdr->bControlSize;
                if (!csize) {
                        usb_audio_dbg(state->chip,
@@ -1486,6 +1492,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
                }
        } else {
                struct uac2_feature_unit_descriptor *ftr = _ftr;
+               if (hdr->bLength < 6) {
+                       usb_audio_err(state->chip,
+                                     "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+                                     unitid);
+                       return -EINVAL;
+               }
                csize = 4;
                channels = (hdr->bLength - 6) / 4 - 1;
                bmaControls = ftr->bmaControls;
@@ -2086,7 +2098,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
        const struct usbmix_name_map *map;
        char **namelist;
 
-       if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
+       if (desc->bLength < 5 || !desc->bNrInPins ||
+           desc->bLength < 5 + desc->bNrInPins) {
                usb_audio_err(state->chip,
                        "invalid SELECTOR UNIT descriptor %d\n", unitid);
                return -EINVAL;
@@ -2330,9 +2343,14 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
 {
        struct usb_mixer_elem_list *list;
 
-       for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem)
+       for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) {
+               struct usb_mixer_elem_info *info =
+                       (struct usb_mixer_elem_info *)list;
+               /* invalidate cache, so the value is read from the device */
+               info->cached = 0;
                snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
                               &list->kctl->id);
+       }
 }
 
 static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
index c03b4f69d5b737defdff632340c4b6095cb0ce57..be02c8b904dba1f3e9135d22d7393895e853f9d3 100644 (file)
@@ -30,6 +30,7 @@ help:
        @echo '  usb                    - USB testing tools'
        @echo '  virtio                 - vhost test module'
        @echo '  vm                     - misc vm tools'
+       @echo '  wmi                    - WMI interface examples'
        @echo '  x86_energy_perf_policy - Intel energy policy tool'
        @echo ''
        @echo 'You can do:'
@@ -58,7 +59,7 @@ acpi: FORCE
 cpupower: FORCE
        $(call descend,power/$@)
 
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds: FORCE
+cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi: FORCE
        $(call descend,$@)
 
 liblockdep: FORCE
@@ -93,7 +94,7 @@ kvm_stat: FORCE
 all: acpi cgroup cpupower gpio hv firewire liblockdep \
                perf selftests spi turbostat usb \
                virtio vm bpf x86_energy_perf_policy \
-               tmon freefall iio objtool kvm_stat
+               tmon freefall iio objtool kvm_stat wmi
 
 acpi_install:
        $(call descend,power/$(@:_install=),install)
@@ -101,7 +102,7 @@ acpi_install:
 cpupower_install:
        $(call descend,power/$(@:_install=),install)
 
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install:
        $(call descend,$(@:_install=),install)
 
 liblockdep_install:
@@ -126,7 +127,8 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
                hv_install firewire_install iio_install liblockdep_install \
                perf_install selftests_install turbostat_install usb_install \
                virtio_install vm_install bpf_install x86_energy_perf_policy_install \
-               tmon_install freefall_install objtool_install kvm_stat_install
+               tmon_install freefall_install objtool_install kvm_stat_install \
+               wmi_install
 
 acpi_clean:
        $(call descend,power/acpi,clean)
@@ -134,7 +136,7 @@ acpi_clean:
 cpupower_clean:
        $(call descend,power/cpupower,clean)
 
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean:
+cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean:
        $(call descend,$(@:_clean=),clean)
 
 liblockdep_clean:
@@ -172,6 +174,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
                perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
                vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
                freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
-               gpio_clean objtool_clean leds_clean
+               gpio_clean objtool_clean leds_clean wmi_clean
 
 .PHONY: FORCE
index f45c44ef9beca35139e1c8262ef88fe7396cd7dc..ad619b96c27664300df9a78bc28958c4d8662a4b 100644 (file)
@@ -41,7 +41,6 @@
 #include <string.h>
 #include <time.h>
 #include <unistd.h>
-#include <net/if.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 
@@ -230,21 +229,6 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
                     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
                     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 
-       if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
-               jsonw_name(json_wtr, "dev");
-               if (info->ifindex) {
-                       char name[IF_NAMESIZE];
-
-                       if (!if_indextoname(info->ifindex, name))
-                               jsonw_printf(json_wtr, "\"ifindex:%d\"",
-                                            info->ifindex);
-                       else
-                               jsonw_printf(json_wtr, "\"%s\"", name);
-               } else {
-                       jsonw_printf(json_wtr, "\"unknown\"");
-               }
-       }
-
        if (info->load_time) {
                char buf[32];
 
@@ -302,21 +286,6 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
 
        printf("tag ");
        fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
-       printf(" ");
-
-       if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
-               printf("dev ");
-               if (info->ifindex) {
-                       char name[IF_NAMESIZE];
-
-                       if (!if_indextoname(info->ifindex, name))
-                               printf("ifindex:%d ", info->ifindex);
-                       else
-                               printf("%s ", name);
-               } else {
-                       printf("unknown ");
-               }
-       }
        printf("\n");
 
        if (info->load_time) {
index e880ae6434eed9eb29db99169c716c94c7cf30aa..4c223ab30293cd1e07248b960e213c7f22b778c5 100644 (file)
@@ -262,7 +262,7 @@ union bpf_attr {
                __u32           kern_version;   /* checked when prog_type=kprobe */
                __u32           prog_flags;
                char            prog_name[BPF_OBJ_NAME_LEN];
-               __u32           prog_target_ifindex;    /* ifindex of netdev to prep for */
+               __u32           prog_ifindex;   /* ifindex of netdev to prep for */
        };
 
        struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -897,10 +897,6 @@ enum sk_action {
 
 #define BPF_TAG_SIZE   8
 
-enum bpf_prog_status {
-       BPF_PROG_STATUS_DEV_BOUND       = (1 << 0),
-};
-
 struct bpf_prog_info {
        __u32 type;
        __u32 id;
@@ -914,8 +910,6 @@ struct bpf_prog_info {
        __u32 nr_map_ids;
        __aligned_u64 map_ids;
        char name[BPF_OBJ_NAME_LEN];
-       __u32 ifindex;
-       __u32 status;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
index 7c214ceb93860abea126f086da2e4e823417fc01..315df0a70265291f2752ab5248e36b32a3e956b7 100644 (file)
@@ -436,13 +436,13 @@ create_arg_exp(enum filter_exp_type etype)
                return NULL;
 
        arg->type = FILTER_ARG_EXP;
-       arg->op.type = etype;
+       arg->exp.type = etype;
 
        return arg;
 }
 
 static struct filter_arg *
-create_arg_cmp(enum filter_exp_type etype)
+create_arg_cmp(enum filter_cmp_type ctype)
 {
        struct filter_arg *arg;
 
@@ -452,7 +452,7 @@ create_arg_cmp(enum filter_exp_type etype)
 
        /* Use NUM and change if necessary */
        arg->type = FILTER_ARG_NUM;
-       arg->op.type = etype;
+       arg->num.type = ctype;
 
        return arg;
 }
index d3102c865a95e0ea82c6979c9acb72c50a61fce1..914cff12899b655f760b0c5d79d3e029a3d09168 100644 (file)
@@ -1,3 +1,3 @@
-arch/x86/insn/inat-tables.c
+arch/x86/lib/inat-tables.c
 objtool
 fixdep
index 424b1965d06f2f95d701d19284cc0f3f7e6e82ea..0f94af3ccaaa25a20325db643d4d79cbaff17688 100644 (file)
@@ -25,7 +25,9 @@ OBJTOOL_IN := $(OBJTOOL)-in.o
 
 all: $(OBJTOOL)
 
-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
+INCLUDES := -I$(srctree)/tools/include \
+           -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+           -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
 CFLAGS   += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
 LDFLAGS  += -lelf $(LIBSUBCMD)
@@ -41,22 +43,8 @@ include $(srctree)/tools/build/Makefile.include
 $(OBJTOOL_IN): fixdep FORCE
        @$(MAKE) $(build)=objtool
 
-# Busybox's diff doesn't have -I, avoid warning in that case
-#
 $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
-       @(diff -I 2>&1 | grep -q 'option requires an argument' && \
-       test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
-       diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
-       diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
-       diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
-       diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
-       diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
-       diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
-       diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
-       || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
-       @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
-       diff ../../arch/x86/include/asm/orc_types.h orc_types.h >/dev/null) \
-       || echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true
+       @./sync-check.sh
        $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
 
 
@@ -66,7 +54,7 @@ $(LIBSUBCMD): fixdep FORCE
 clean:
        $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
        $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
-       $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
+       $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
 
 FORCE:
 
index debbdb0b5c430b3a74143bd216db464956522733..b998412c017d9173d27b10a760899fd0c9efa32f 100644 (file)
@@ -1,12 +1,12 @@
 objtool-y += decode.o
 
-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
+inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
+inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
 
-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
        $(call rule_mkdir)
        $(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
 
-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
+$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
 
-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
+CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
index 34a579f806e390337bdee738ae507364c02e7ad7..8acfc47af70efde4c1a3bb3ad6aff809f0ad0308 100644 (file)
@@ -19,9 +19,9 @@
 #include <stdlib.h>
 
 #define unlikely(cond) (cond)
-#include "insn/insn.h"
-#include "insn/inat.c"
-#include "insn/insn.c"
+#include <asm/insn.h>
+#include "lib/inat.c"
+#include "lib/insn.c"
 
 #include "../../elf.h"
 #include "../../arch.h"
diff --git a/tools/objtool/arch/x86/include/asm/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
new file mode 100644 (file)
index 0000000..1c78580
--- /dev/null
@@ -0,0 +1,244 @@
+#ifndef _ASM_X86_INAT_H
+#define _ASM_X86_INAT_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <asm/inat_types.h>
+
+/*
+ * Internal bits. Don't use bitmasks directly, because these bits are
+ * unstable. You should use checking functions.
+ */
+
+#define INAT_OPCODE_TABLE_SIZE 256
+#define INAT_GROUP_TABLE_SIZE 8
+
+/* Legacy last prefixes */
+#define INAT_PFX_OPNDSZ        1       /* 0x66 */ /* LPFX1 */
+#define INAT_PFX_REPE  2       /* 0xF3 */ /* LPFX2 */
+#define INAT_PFX_REPNE 3       /* 0xF2 */ /* LPFX3 */
+/* Other Legacy prefixes */
+#define INAT_PFX_LOCK  4       /* 0xF0 */
+#define INAT_PFX_CS    5       /* 0x2E */
+#define INAT_PFX_DS    6       /* 0x3E */
+#define INAT_PFX_ES    7       /* 0x26 */
+#define INAT_PFX_FS    8       /* 0x64 */
+#define INAT_PFX_GS    9       /* 0x65 */
+#define INAT_PFX_SS    10      /* 0x36 */
+#define INAT_PFX_ADDRSZ        11      /* 0x67 */
+/* x86-64 REX prefix */
+#define INAT_PFX_REX   12      /* 0x4X */
+/* AVX VEX prefixes */
+#define INAT_PFX_VEX2  13      /* 2-bytes VEX prefix */
+#define INAT_PFX_VEX3  14      /* 3-bytes VEX prefix */
+#define INAT_PFX_EVEX  15      /* EVEX prefix */
+
+#define INAT_LSTPFX_MAX        3
+#define INAT_LGCPFX_MAX        11
+
+/* Immediate size */
+#define INAT_IMM_BYTE          1
+#define INAT_IMM_WORD          2
+#define INAT_IMM_DWORD         3
+#define INAT_IMM_QWORD         4
+#define INAT_IMM_PTR           5
+#define INAT_IMM_VWORD32       6
+#define INAT_IMM_VWORD         7
+
+/* Legacy prefix */
+#define INAT_PFX_OFFS  0
+#define INAT_PFX_BITS  4
+#define INAT_PFX_MAX    ((1 << INAT_PFX_BITS) - 1)
+#define INAT_PFX_MASK  (INAT_PFX_MAX << INAT_PFX_OFFS)
+/* Escape opcodes */
+#define INAT_ESC_OFFS  (INAT_PFX_OFFS + INAT_PFX_BITS)
+#define INAT_ESC_BITS  2
+#define INAT_ESC_MAX   ((1 << INAT_ESC_BITS) - 1)
+#define INAT_ESC_MASK  (INAT_ESC_MAX << INAT_ESC_OFFS)
+/* Group opcodes (1-16) */
+#define INAT_GRP_OFFS  (INAT_ESC_OFFS + INAT_ESC_BITS)
+#define INAT_GRP_BITS  5
+#define INAT_GRP_MAX   ((1 << INAT_GRP_BITS) - 1)
+#define INAT_GRP_MASK  (INAT_GRP_MAX << INAT_GRP_OFFS)
+/* Immediates */
+#define INAT_IMM_OFFS  (INAT_GRP_OFFS + INAT_GRP_BITS)
+#define INAT_IMM_BITS  3
+#define INAT_IMM_MASK  (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
+/* Flags */
+#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
+#define INAT_MODRM     (1 << (INAT_FLAG_OFFS))
+#define INAT_FORCE64   (1 << (INAT_FLAG_OFFS + 1))
+#define INAT_SCNDIMM   (1 << (INAT_FLAG_OFFS + 2))
+#define INAT_MOFFSET   (1 << (INAT_FLAG_OFFS + 3))
+#define INAT_VARIANT   (1 << (INAT_FLAG_OFFS + 4))
+#define INAT_VEXOK     (1 << (INAT_FLAG_OFFS + 5))
+#define INAT_VEXONLY   (1 << (INAT_FLAG_OFFS + 6))
+#define INAT_EVEXONLY  (1 << (INAT_FLAG_OFFS + 7))
+/* Attribute making macros for attribute tables */
+#define INAT_MAKE_PREFIX(pfx)  (pfx << INAT_PFX_OFFS)
+#define INAT_MAKE_ESCAPE(esc)  (esc << INAT_ESC_OFFS)
+#define INAT_MAKE_GROUP(grp)   ((grp << INAT_GRP_OFFS) | INAT_MODRM)
+#define INAT_MAKE_IMM(imm)     (imm << INAT_IMM_OFFS)
+
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE    0
+#define INAT_SEG_REG_DEFAULT   1
+#define INAT_SEG_REG_CS                2
+#define INAT_SEG_REG_SS                3
+#define INAT_SEG_REG_DS                4
+#define INAT_SEG_REG_ES                5
+#define INAT_SEG_REG_FS                6
+#define INAT_SEG_REG_GS                7
+
+/* Attribute search APIs */
+extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
+extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
+                                            int lpfx_id,
+                                            insn_attr_t esc_attr);
+extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
+                                           int lpfx_id,
+                                           insn_attr_t esc_attr);
+extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
+                                         insn_byte_t vex_m,
+                                         insn_byte_t vex_pp);
+
+/* Attribute checking functions */
+static inline int inat_is_legacy_prefix(insn_attr_t attr)
+{
+       attr &= INAT_PFX_MASK;
+       return attr && attr <= INAT_LGCPFX_MAX;
+}
+
+static inline int inat_is_address_size_prefix(insn_attr_t attr)
+{
+       return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
+}
+
+static inline int inat_is_operand_size_prefix(insn_attr_t attr)
+{
+       return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
+}
+
+static inline int inat_is_rex_prefix(insn_attr_t attr)
+{
+       return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
+}
+
+static inline int inat_last_prefix_id(insn_attr_t attr)
+{
+       if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
+               return 0;
+       else
+               return attr & INAT_PFX_MASK;
+}
+
+static inline int inat_is_vex_prefix(insn_attr_t attr)
+{
+       attr &= INAT_PFX_MASK;
+       return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
+              attr == INAT_PFX_EVEX;
+}
+
+static inline int inat_is_evex_prefix(insn_attr_t attr)
+{
+       return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
+}
+
+static inline int inat_is_vex3_prefix(insn_attr_t attr)
+{
+       return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
+}
+
+static inline int inat_is_escape(insn_attr_t attr)
+{
+       return attr & INAT_ESC_MASK;
+}
+
+static inline int inat_escape_id(insn_attr_t attr)
+{
+       return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
+}
+
+static inline int inat_is_group(insn_attr_t attr)
+{
+       return attr & INAT_GRP_MASK;
+}
+
+static inline int inat_group_id(insn_attr_t attr)
+{
+       return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
+}
+
+static inline int inat_group_common_attribute(insn_attr_t attr)
+{
+       return attr & ~INAT_GRP_MASK;
+}
+
+static inline int inat_has_immediate(insn_attr_t attr)
+{
+       return attr & INAT_IMM_MASK;
+}
+
+static inline int inat_immediate_size(insn_attr_t attr)
+{
+       return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
+}
+
+static inline int inat_has_modrm(insn_attr_t attr)
+{
+       return attr & INAT_MODRM;
+}
+
+static inline int inat_is_force64(insn_attr_t attr)
+{
+       return attr & INAT_FORCE64;
+}
+
+static inline int inat_has_second_immediate(insn_attr_t attr)
+{
+       return attr & INAT_SCNDIMM;
+}
+
+static inline int inat_has_moffset(insn_attr_t attr)
+{
+       return attr & INAT_MOFFSET;
+}
+
+static inline int inat_has_variant(insn_attr_t attr)
+{
+       return attr & INAT_VARIANT;
+}
+
+static inline int inat_accept_vex(insn_attr_t attr)
+{
+       return attr & INAT_VEXOK;
+}
+
+static inline int inat_must_vex(insn_attr_t attr)
+{
+       return attr & (INAT_VEXONLY | INAT_EVEXONLY);
+}
+
+static inline int inat_must_evex(insn_attr_t attr)
+{
+       return attr & INAT_EVEXONLY;
+}
+#endif
diff --git a/tools/objtool/arch/x86/include/asm/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
new file mode 100644 (file)
index 0000000..cb3c20c
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_X86_INAT_TYPES_H
+#define _ASM_X86_INAT_TYPES_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+/* Instruction attributes */
+typedef unsigned int insn_attr_t;
+typedef unsigned char insn_byte_t;
+typedef signed int insn_value_t;
+
+#endif
diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
new file mode 100644 (file)
index 0000000..b3e32b0
--- /dev/null
@@ -0,0 +1,211 @@
+#ifndef _ASM_X86_INSN_H
+#define _ASM_X86_INSN_H
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ */
+
+/* insn_attr_t is defined in inat.h */
+#include <asm/inat.h>
+
+struct insn_field {
+       union {
+               insn_value_t value;
+               insn_byte_t bytes[4];
+       };
+       /* !0 if we've run insn_get_xxx() for this field */
+       unsigned char got;
+       unsigned char nbytes;
+};
+
+struct insn {
+       struct insn_field prefixes;     /*
+                                        * Prefixes
+                                        * prefixes.bytes[3]: last prefix
+                                        */
+       struct insn_field rex_prefix;   /* REX prefix */
+       struct insn_field vex_prefix;   /* VEX prefix */
+       struct insn_field opcode;       /*
+                                        * opcode.bytes[0]: opcode1
+                                        * opcode.bytes[1]: opcode2
+                                        * opcode.bytes[2]: opcode3
+                                        */
+       struct insn_field modrm;
+       struct insn_field sib;
+       struct insn_field displacement;
+       union {
+               struct insn_field immediate;
+               struct insn_field moffset1;     /* for 64bit MOV */
+               struct insn_field immediate1;   /* for 64bit imm or off16/32 */
+       };
+       union {
+               struct insn_field moffset2;     /* for 64bit MOV */
+               struct insn_field immediate2;   /* for 64bit imm or seg16 */
+       };
+
+       insn_attr_t attr;
+       unsigned char opnd_bytes;
+       unsigned char addr_bytes;
+       unsigned char length;
+       unsigned char x86_64;
+
+       const insn_byte_t *kaddr;       /* kernel address of insn to analyze */
+       const insn_byte_t *end_kaddr;   /* kernel address of last insn in buffer */
+       const insn_byte_t *next_byte;
+};
+
+#define MAX_INSN_SIZE  15
+
+#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
+
+#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
+#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
+#define X86_SIB_BASE(sib) ((sib) & 0x07)
+
+#define X86_REX_W(rex) ((rex) & 8)
+#define X86_REX_R(rex) ((rex) & 4)
+#define X86_REX_X(rex) ((rex) & 2)
+#define X86_REX_B(rex) ((rex) & 1)
+
+/* VEX bit flags  */
+#define X86_VEX_W(vex) ((vex) & 0x80)  /* VEX3 Byte2 */
+#define X86_VEX_R(vex) ((vex) & 0x80)  /* VEX2/3 Byte1 */
+#define X86_VEX_X(vex) ((vex) & 0x40)  /* VEX3 Byte1 */
+#define X86_VEX_B(vex) ((vex) & 0x20)  /* VEX3 Byte1 */
+#define X86_VEX_L(vex) ((vex) & 0x04)  /* VEX3 Byte2, VEX2 Byte1 */
+/* VEX bit fields */
+#define X86_EVEX_M(vex)        ((vex) & 0x03)          /* EVEX Byte1 */
+#define X86_VEX3_M(vex)        ((vex) & 0x1f)          /* VEX3 Byte1 */
+#define X86_VEX2_M     1                       /* VEX2.M always 1 */
+#define X86_VEX_V(vex) (((vex) & 0x78) >> 3)   /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_P(vex) ((vex) & 0x03)          /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_M_MAX  0x1f                    /* VEX3.M Maximum value */
+
+extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
+extern void insn_get_prefixes(struct insn *insn);
+extern void insn_get_opcode(struct insn *insn);
+extern void insn_get_modrm(struct insn *insn);
+extern void insn_get_sib(struct insn *insn);
+extern void insn_get_displacement(struct insn *insn);
+extern void insn_get_immediate(struct insn *insn);
+extern void insn_get_length(struct insn *insn);
+
+/* Attribute will be determined after getting ModRM (for opcode groups) */
+static inline void insn_get_attribute(struct insn *insn)
+{
+       insn_get_modrm(insn);
+}
+
+/* Instruction uses RIP-relative addressing */
+extern int insn_rip_relative(struct insn *insn);
+
+/* Init insn for kernel text */
+static inline void kernel_insn_init(struct insn *insn,
+                                   const void *kaddr, int buf_len)
+{
+#ifdef CONFIG_X86_64
+       insn_init(insn, kaddr, buf_len, 1);
+#else /* CONFIG_X86_32 */
+       insn_init(insn, kaddr, buf_len, 0);
+#endif
+}
+
+static inline int insn_is_avx(struct insn *insn)
+{
+       if (!insn->prefixes.got)
+               insn_get_prefixes(insn);
+       return (insn->vex_prefix.value != 0);
+}
+
+static inline int insn_is_evex(struct insn *insn)
+{
+       if (!insn->prefixes.got)
+               insn_get_prefixes(insn);
+       return (insn->vex_prefix.nbytes == 4);
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+       return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+               insn->displacement.got && insn->immediate.got;
+}
+
+static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
+{
+       if (insn->vex_prefix.nbytes == 2)       /* 2 bytes VEX */
+               return X86_VEX2_M;
+       else if (insn->vex_prefix.nbytes == 3)  /* 3 bytes VEX */
+               return X86_VEX3_M(insn->vex_prefix.bytes[1]);
+       else                                    /* EVEX */
+               return X86_EVEX_M(insn->vex_prefix.bytes[1]);
+}
+
+static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
+{
+       if (insn->vex_prefix.nbytes == 2)       /* 2 bytes VEX */
+               return X86_VEX_P(insn->vex_prefix.bytes[1]);
+       else
+               return X86_VEX_P(insn->vex_prefix.bytes[2]);
+}
+
+/* Get the last prefix id from last prefix or VEX prefix */
+static inline int insn_last_prefix_id(struct insn *insn)
+{
+       if (insn_is_avx(insn))
+               return insn_vex_p_bits(insn);   /* VEX_p is a SIMD prefix id */
+
+       if (insn->prefixes.bytes[3])
+               return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+
+       return 0;
+}
+
+/* Offset of each field from kaddr */
+static inline int insn_offset_rex_prefix(struct insn *insn)
+{
+       return insn->prefixes.nbytes;
+}
+static inline int insn_offset_vex_prefix(struct insn *insn)
+{
+       return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
+}
+static inline int insn_offset_opcode(struct insn *insn)
+{
+       return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
+}
+static inline int insn_offset_modrm(struct insn *insn)
+{
+       return insn_offset_opcode(insn) + insn->opcode.nbytes;
+}
+static inline int insn_offset_sib(struct insn *insn)
+{
+       return insn_offset_modrm(insn) + insn->modrm.nbytes;
+}
+static inline int insn_offset_displacement(struct insn *insn)
+{
+       return insn_offset_sib(insn) + insn->sib.nbytes;
+}
+static inline int insn_offset_immediate(struct insn *insn)
+{
+       return insn_offset_displacement(insn) + insn->displacement.nbytes;
+}
+
+#endif /* _ASM_X86_INSN_H */
diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
new file mode 100644 (file)
index 0000000..9c9dc57
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED              0
+#define ORC_REG_PREV_SP                        1
+#define ORC_REG_DX                     2
+#define ORC_REG_DI                     3
+#define ORC_REG_BP                     4
+#define ORC_REG_SP                     5
+#define ORC_REG_R10                    6
+#define ORC_REG_R13                    7
+#define ORC_REG_BP_INDIRECT            8
+#define ORC_REG_SP_INDIRECT            9
+#define ORC_REG_MAX                    15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call).  Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct.  They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL                  0
+#define ORC_TYPE_REGS                  1
+#define ORC_TYPE_REGS_IRET             2
+#define UNWIND_HINT_TYPE_SAVE          3
+#define UNWIND_HINT_TYPE_RESTORE       4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard.  It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder.  It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address.  Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+       s16             sp_offset;
+       s16             bp_offset;
+       unsigned        sp_reg:4;
+       unsigned        bp_reg:4;
+       unsigned        type:2;
+} __packed;
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+       u32             ip;
+       s16             sp_offset;
+       u8              sp_reg;
+       u8              type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
deleted file mode 100644 (file)
index b02a36b..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-#!/bin/awk -f
-# SPDX-License-Identifier: GPL-2.0
-# gen-insn-attr-x86.awk: Instruction attribute table generator
-# Written by Masami Hiramatsu <mhiramat@redhat.com>
-#
-# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
-
-# Awk implementation sanity check
-function check_awk_implement() {
-       if (sprintf("%x", 0) != "0")
-               return "Your awk has a printf-format problem."
-       return ""
-}
-
-# Clear working vars
-function clear_vars() {
-       delete table
-       delete lptable2
-       delete lptable1
-       delete lptable3
-       eid = -1 # escape id
-       gid = -1 # group id
-       aid = -1 # AVX id
-       tname = ""
-}
-
-BEGIN {
-       # Implementation error checking
-       awkchecked = check_awk_implement()
-       if (awkchecked != "") {
-               print "Error: " awkchecked > "/dev/stderr"
-               print "Please try to use gawk." > "/dev/stderr"
-               exit 1
-       }
-
-       # Setup generating tables
-       print "/* x86 opcode map generated from x86-opcode-map.txt */"
-       print "/* Do not change this code. */\n"
-       ggid = 1
-       geid = 1
-       gaid = 0
-       delete etable
-       delete gtable
-       delete atable
-
-       opnd_expr = "^[A-Za-z/]"
-       ext_expr = "^\\("
-       sep_expr = "^\\|$"
-       group_expr = "^Grp[0-9A-Za-z]+"
-
-       imm_expr = "^[IJAOL][a-z]"
-       imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
-       imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
-       imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
-       imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
-       imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
-       imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
-       imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
-       imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
-       imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
-       imm_flag["Ob"] = "INAT_MOFFSET"
-       imm_flag["Ov"] = "INAT_MOFFSET"
-       imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
-
-       modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
-       force64_expr = "\\([df]64\\)"
-       rex_expr = "^REX(\\.[XRWB]+)*"
-       fpu_expr = "^ESC" # TODO
-
-       lprefix1_expr = "\\((66|!F3)\\)"
-       lprefix2_expr = "\\(F3\\)"
-       lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
-       lprefix_expr = "\\((66|F2|F3)\\)"
-       max_lprefix = 4
-
-       # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
-       # accepts VEX prefix
-       vexok_opcode_expr = "^[vk].*"
-       vexok_expr = "\\(v1\\)"
-       # All opcodes with (v) superscript supports *only* VEX prefix
-       vexonly_expr = "\\(v\\)"
-       # All opcodes with (ev) superscript supports *only* EVEX prefix
-       evexonly_expr = "\\(ev\\)"
-
-       prefix_expr = "\\(Prefix\\)"
-       prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
-       prefix_num["REPNE"] = "INAT_PFX_REPNE"
-       prefix_num["REP/REPE"] = "INAT_PFX_REPE"
-       prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
-       prefix_num["XRELEASE"] = "INAT_PFX_REPE"
-       prefix_num["LOCK"] = "INAT_PFX_LOCK"
-       prefix_num["SEG=CS"] = "INAT_PFX_CS"
-       prefix_num["SEG=DS"] = "INAT_PFX_DS"
-       prefix_num["SEG=ES"] = "INAT_PFX_ES"
-       prefix_num["SEG=FS"] = "INAT_PFX_FS"
-       prefix_num["SEG=GS"] = "INAT_PFX_GS"
-       prefix_num["SEG=SS"] = "INAT_PFX_SS"
-       prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
-       prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
-       prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
-       prefix_num["EVEX"] = "INAT_PFX_EVEX"
-
-       clear_vars()
-}
-
-function semantic_error(msg) {
-       print "Semantic error at " NR ": " msg > "/dev/stderr"
-       exit 1
-}
-
-function debug(msg) {
-       print "DEBUG: " msg
-}
-
-function array_size(arr,   i,c) {
-       c = 0
-       for (i in arr)
-               c++
-       return c
-}
-
-/^Table:/ {
-       print "/* " $0 " */"
-       if (tname != "")
-               semantic_error("Hit Table: before EndTable:.");
-}
-
-/^Referrer:/ {
-       if (NF != 1) {
-               # escape opcode table
-               ref = ""
-               for (i = 2; i <= NF; i++)
-                       ref = ref $i
-               eid = escape[ref]
-               tname = sprintf("inat_escape_table_%d", eid)
-       }
-}
-
-/^AVXcode:/ {
-       if (NF != 1) {
-               # AVX/escape opcode table
-               aid = $2
-               if (gaid <= aid)
-                       gaid = aid + 1
-               if (tname == "")        # AVX only opcode table
-                       tname = sprintf("inat_avx_table_%d", $2)
-       }
-       if (aid == -1 && eid == -1)     # primary opcode table
-               tname = "inat_primary_table"
-}
-
-/^GrpTable:/ {
-       print "/* " $0 " */"
-       if (!($2 in group))
-               semantic_error("No group: " $2 )
-       gid = group[$2]
-       tname = "inat_group_table_" gid
-}
-
-function print_table(tbl,name,fmt,n)
-{
-       print "const insn_attr_t " name " = {"
-       for (i = 0; i < n; i++) {
-               id = sprintf(fmt, i)
-               if (tbl[id])
-                       print " [" id "] = " tbl[id] ","
-       }
-       print "};"
-}
-
-/^EndTable/ {
-       if (gid != -1) {
-               # print group tables
-               if (array_size(table) != 0) {
-                       print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
-                                   "0x%x", 8)
-                       gtable[gid,0] = tname
-               }
-               if (array_size(lptable1) != 0) {
-                       print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
-                                   "0x%x", 8)
-                       gtable[gid,1] = tname "_1"
-               }
-               if (array_size(lptable2) != 0) {
-                       print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
-                                   "0x%x", 8)
-                       gtable[gid,2] = tname "_2"
-               }
-               if (array_size(lptable3) != 0) {
-                       print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
-                                   "0x%x", 8)
-                       gtable[gid,3] = tname "_3"
-               }
-       } else {
-               # print primary/escaped tables
-               if (array_size(table) != 0) {
-                       print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
-                                   "0x%02x", 256)
-                       etable[eid,0] = tname
-                       if (aid >= 0)
-                               atable[aid,0] = tname
-               }
-               if (array_size(lptable1) != 0) {
-                       print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
-                                   "0x%02x", 256)
-                       etable[eid,1] = tname "_1"
-                       if (aid >= 0)
-                               atable[aid,1] = tname "_1"
-               }
-               if (array_size(lptable2) != 0) {
-                       print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
-                                   "0x%02x", 256)
-                       etable[eid,2] = tname "_2"
-                       if (aid >= 0)
-                               atable[aid,2] = tname "_2"
-               }
-               if (array_size(lptable3) != 0) {
-                       print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
-                                   "0x%02x", 256)
-                       etable[eid,3] = tname "_3"
-                       if (aid >= 0)
-                               atable[aid,3] = tname "_3"
-               }
-       }
-       print ""
-       clear_vars()
-}
-
-function add_flags(old,new) {
-       if (old && new)
-               return old " | " new
-       else if (old)
-               return old
-       else
-               return new
-}
-
-# convert operands to flags.
-function convert_operands(count,opnd,       i,j,imm,mod)
-{
-       imm = null
-       mod = null
-       for (j = 1; j <= count; j++) {
-               i = opnd[j]
-               if (match(i, imm_expr) == 1) {
-                       if (!imm_flag[i])
-                               semantic_error("Unknown imm opnd: " i)
-                       if (imm) {
-                               if (i != "Ib")
-                                       semantic_error("Second IMM error")
-                               imm = add_flags(imm, "INAT_SCNDIMM")
-                       } else
-                               imm = imm_flag[i]
-               } else if (match(i, modrm_expr))
-                       mod = "INAT_MODRM"
-       }
-       return add_flags(imm, mod)
-}
-
-/^[0-9a-f]+\:/ {
-       if (NR == 1)
-               next
-       # get index
-       idx = "0x" substr($1, 1, index($1,":") - 1)
-       if (idx in table)
-               semantic_error("Redefine " idx " in " tname)
-
-       # check if escaped opcode
-       if ("escape" == $2) {
-               if ($3 != "#")
-                       semantic_error("No escaped name")
-               ref = ""
-               for (i = 4; i <= NF; i++)
-                       ref = ref $i
-               if (ref in escape)
-                       semantic_error("Redefine escape (" ref ")")
-               escape[ref] = geid
-               geid++
-               table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
-               next
-       }
-
-       variant = null
-       # converts
-       i = 2
-       while (i <= NF) {
-               opcode = $(i++)
-               delete opnds
-               ext = null
-               flags = null
-               opnd = null
-               # parse one opcode
-               if (match($i, opnd_expr)) {
-                       opnd = $i
-                       count = split($(i++), opnds, ",")
-                       flags = convert_operands(count, opnds)
-               }
-               if (match($i, ext_expr))
-                       ext = $(i++)
-               if (match($i, sep_expr))
-                       i++
-               else if (i < NF)
-                       semantic_error($i " is not a separator")
-
-               # check if group opcode
-               if (match(opcode, group_expr)) {
-                       if (!(opcode in group)) {
-                               group[opcode] = ggid
-                               ggid++
-                       }
-                       flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
-               }
-               # check force(or default) 64bit
-               if (match(ext, force64_expr))
-                       flags = add_flags(flags, "INAT_FORCE64")
-
-               # check REX prefix
-               if (match(opcode, rex_expr))
-                       flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
-
-               # check coprocessor escape : TODO
-               if (match(opcode, fpu_expr))
-                       flags = add_flags(flags, "INAT_MODRM")
-
-               # check VEX codes
-               if (match(ext, evexonly_expr))
-                       flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
-               else if (match(ext, vexonly_expr))
-                       flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
-               else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
-                       flags = add_flags(flags, "INAT_VEXOK")
-
-               # check prefixes
-               if (match(ext, prefix_expr)) {
-                       if (!prefix_num[opcode])
-                               semantic_error("Unknown prefix: " opcode)
-                       flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
-               }
-               if (length(flags) == 0)
-                       continue
-               # check if last prefix
-               if (match(ext, lprefix1_expr)) {
-                       lptable1[idx] = add_flags(lptable1[idx],flags)
-                       variant = "INAT_VARIANT"
-               }
-               if (match(ext, lprefix2_expr)) {
-                       lptable2[idx] = add_flags(lptable2[idx],flags)
-                       variant = "INAT_VARIANT"
-               }
-               if (match(ext, lprefix3_expr)) {
-                       lptable3[idx] = add_flags(lptable3[idx],flags)
-                       variant = "INAT_VARIANT"
-               }
-               if (!match(ext, lprefix_expr)){
-                       table[idx] = add_flags(table[idx],flags)
-               }
-       }
-       if (variant)
-               table[idx] = add_flags(table[idx],variant)
-}
-
-END {
-       if (awkchecked != "")
-               exit 1
-       # print escape opcode map's array
-       print "/* Escape opcode map array */"
-       print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
-             "[INAT_LSTPFX_MAX + 1] = {"
-       for (i = 0; i < geid; i++)
-               for (j = 0; j < max_lprefix; j++)
-                       if (etable[i,j])
-                               print " ["i"]["j"] = "etable[i,j]","
-       print "};\n"
-       # print group opcode map's array
-       print "/* Group opcode map array */"
-       print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
-             "[INAT_LSTPFX_MAX + 1] = {"
-       for (i = 0; i < ggid; i++)
-               for (j = 0; j < max_lprefix; j++)
-                       if (gtable[i,j])
-                               print " ["i"]["j"] = "gtable[i,j]","
-       print "};\n"
-       # print AVX opcode map's array
-       print "/* AVX opcode map array */"
-       print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
-             "[INAT_LSTPFX_MAX + 1] = {"
-       for (i = 0; i < gaid; i++)
-               for (j = 0; j < max_lprefix; j++)
-                       if (atable[i,j])
-                               print " ["i"]["j"] = "atable[i,j]","
-       print "};"
-}
-
diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/insn/inat.c
deleted file mode 100644 (file)
index e4bf28e..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * x86 instruction attribute tables
- *
- * Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#include "insn.h"
-
-/* Attribute tables are generated from opcode map */
-#include "inat-tables.c"
-
-/* Attribute search APIs */
-insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
-{
-       return inat_primary_table[opcode];
-}
-
-int inat_get_last_prefix_id(insn_byte_t last_pfx)
-{
-       insn_attr_t lpfx_attr;
-
-       lpfx_attr = inat_get_opcode_attribute(last_pfx);
-       return inat_last_prefix_id(lpfx_attr);
-}
-
-insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
-                                     insn_attr_t esc_attr)
-{
-       const insn_attr_t *table;
-       int n;
-
-       n = inat_escape_id(esc_attr);
-
-       table = inat_escape_tables[n][0];
-       if (!table)
-               return 0;
-       if (inat_has_variant(table[opcode]) && lpfx_id) {
-               table = inat_escape_tables[n][lpfx_id];
-               if (!table)
-                       return 0;
-       }
-       return table[opcode];
-}
-
-insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
-                                    insn_attr_t grp_attr)
-{
-       const insn_attr_t *table;
-       int n;
-
-       n = inat_group_id(grp_attr);
-
-       table = inat_group_tables[n][0];
-       if (!table)
-               return inat_group_common_attribute(grp_attr);
-       if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
-               table = inat_group_tables[n][lpfx_id];
-               if (!table)
-                       return inat_group_common_attribute(grp_attr);
-       }
-       return table[X86_MODRM_REG(modrm)] |
-              inat_group_common_attribute(grp_attr);
-}
-
-insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
-                                  insn_byte_t vex_p)
-{
-       const insn_attr_t *table;
-       if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
-               return 0;
-       /* At first, this checks the master table */
-       table = inat_avx_tables[vex_m][0];
-       if (!table)
-               return 0;
-       if (!inat_is_group(table[opcode]) && vex_p) {
-               /* If this is not a group, get attribute directly */
-               table = inat_avx_tables[vex_m][vex_p];
-               if (!table)
-                       return 0;
-       }
-       return table[opcode];
-}
-
diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/insn/inat.h
deleted file mode 100644 (file)
index 125ecd2..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-#ifndef _ASM_X86_INAT_H
-#define _ASM_X86_INAT_H
-/*
- * x86 instruction attributes
- *
- * Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#include "inat_types.h"
-
-/*
- * Internal bits. Don't use bitmasks directly, because these bits are
- * unstable. You should use checking functions.
- */
-
-#define INAT_OPCODE_TABLE_SIZE 256
-#define INAT_GROUP_TABLE_SIZE 8
-
-/* Legacy last prefixes */
-#define INAT_PFX_OPNDSZ        1       /* 0x66 */ /* LPFX1 */
-#define INAT_PFX_REPE  2       /* 0xF3 */ /* LPFX2 */
-#define INAT_PFX_REPNE 3       /* 0xF2 */ /* LPFX3 */
-/* Other Legacy prefixes */
-#define INAT_PFX_LOCK  4       /* 0xF0 */
-#define INAT_PFX_CS    5       /* 0x2E */
-#define INAT_PFX_DS    6       /* 0x3E */
-#define INAT_PFX_ES    7       /* 0x26 */
-#define INAT_PFX_FS    8       /* 0x64 */
-#define INAT_PFX_GS    9       /* 0x65 */
-#define INAT_PFX_SS    10      /* 0x36 */
-#define INAT_PFX_ADDRSZ        11      /* 0x67 */
-/* x86-64 REX prefix */
-#define INAT_PFX_REX   12      /* 0x4X */
-/* AVX VEX prefixes */
-#define INAT_PFX_VEX2  13      /* 2-bytes VEX prefix */
-#define INAT_PFX_VEX3  14      /* 3-bytes VEX prefix */
-#define INAT_PFX_EVEX  15      /* EVEX prefix */
-
-#define INAT_LSTPFX_MAX        3
-#define INAT_LGCPFX_MAX        11
-
-/* Immediate size */
-#define INAT_IMM_BYTE          1
-#define INAT_IMM_WORD          2
-#define INAT_IMM_DWORD         3
-#define INAT_IMM_QWORD         4
-#define INAT_IMM_PTR           5
-#define INAT_IMM_VWORD32       6
-#define INAT_IMM_VWORD         7
-
-/* Legacy prefix */
-#define INAT_PFX_OFFS  0
-#define INAT_PFX_BITS  4
-#define INAT_PFX_MAX    ((1 << INAT_PFX_BITS) - 1)
-#define INAT_PFX_MASK  (INAT_PFX_MAX << INAT_PFX_OFFS)
-/* Escape opcodes */
-#define INAT_ESC_OFFS  (INAT_PFX_OFFS + INAT_PFX_BITS)
-#define INAT_ESC_BITS  2
-#define INAT_ESC_MAX   ((1 << INAT_ESC_BITS) - 1)
-#define INAT_ESC_MASK  (INAT_ESC_MAX << INAT_ESC_OFFS)
-/* Group opcodes (1-16) */
-#define INAT_GRP_OFFS  (INAT_ESC_OFFS + INAT_ESC_BITS)
-#define INAT_GRP_BITS  5
-#define INAT_GRP_MAX   ((1 << INAT_GRP_BITS) - 1)
-#define INAT_GRP_MASK  (INAT_GRP_MAX << INAT_GRP_OFFS)
-/* Immediates */
-#define INAT_IMM_OFFS  (INAT_GRP_OFFS + INAT_GRP_BITS)
-#define INAT_IMM_BITS  3
-#define INAT_IMM_MASK  (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
-/* Flags */
-#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
-#define INAT_MODRM     (1 << (INAT_FLAG_OFFS))
-#define INAT_FORCE64   (1 << (INAT_FLAG_OFFS + 1))
-#define INAT_SCNDIMM   (1 << (INAT_FLAG_OFFS + 2))
-#define INAT_MOFFSET   (1 << (INAT_FLAG_OFFS + 3))
-#define INAT_VARIANT   (1 << (INAT_FLAG_OFFS + 4))
-#define INAT_VEXOK     (1 << (INAT_FLAG_OFFS + 5))
-#define INAT_VEXONLY   (1 << (INAT_FLAG_OFFS + 6))
-#define INAT_EVEXONLY  (1 << (INAT_FLAG_OFFS + 7))
-/* Attribute making macros for attribute tables */
-#define INAT_MAKE_PREFIX(pfx)  (pfx << INAT_PFX_OFFS)
-#define INAT_MAKE_ESCAPE(esc)  (esc << INAT_ESC_OFFS)
-#define INAT_MAKE_GROUP(grp)   ((grp << INAT_GRP_OFFS) | INAT_MODRM)
-#define INAT_MAKE_IMM(imm)     (imm << INAT_IMM_OFFS)
-
-/* Attribute search APIs */
-extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
-extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
-extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
-                                            int lpfx_id,
-                                            insn_attr_t esc_attr);
-extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
-                                           int lpfx_id,
-                                           insn_attr_t esc_attr);
-extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
-                                         insn_byte_t vex_m,
-                                         insn_byte_t vex_pp);
-
-/* Attribute checking functions */
-static inline int inat_is_legacy_prefix(insn_attr_t attr)
-{
-       attr &= INAT_PFX_MASK;
-       return attr && attr <= INAT_LGCPFX_MAX;
-}
-
-static inline int inat_is_address_size_prefix(insn_attr_t attr)
-{
-       return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
-}
-
-static inline int inat_is_operand_size_prefix(insn_attr_t attr)
-{
-       return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
-}
-
-static inline int inat_is_rex_prefix(insn_attr_t attr)
-{
-       return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
-}
-
-static inline int inat_last_prefix_id(insn_attr_t attr)
-{
-       if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
-               return 0;
-       else
-               return attr & INAT_PFX_MASK;
-}
-
-static inline int inat_is_vex_prefix(insn_attr_t attr)
-{
-       attr &= INAT_PFX_MASK;
-       return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
-              attr == INAT_PFX_EVEX;
-}
-
-static inline int inat_is_evex_prefix(insn_attr_t attr)
-{
-       return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
-}
-
-static inline int inat_is_vex3_prefix(insn_attr_t attr)
-{
-       return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
-}
-
-static inline int inat_is_escape(insn_attr_t attr)
-{
-       return attr & INAT_ESC_MASK;
-}
-
-static inline int inat_escape_id(insn_attr_t attr)
-{
-       return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
-}
-
-static inline int inat_is_group(insn_attr_t attr)
-{
-       return attr & INAT_GRP_MASK;
-}
-
-static inline int inat_group_id(insn_attr_t attr)
-{
-       return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
-}
-
-static inline int inat_group_common_attribute(insn_attr_t attr)
-{
-       return attr & ~INAT_GRP_MASK;
-}
-
-static inline int inat_has_immediate(insn_attr_t attr)
-{
-       return attr & INAT_IMM_MASK;
-}
-
-static inline int inat_immediate_size(insn_attr_t attr)
-{
-       return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
-}
-
-static inline int inat_has_modrm(insn_attr_t attr)
-{
-       return attr & INAT_MODRM;
-}
-
-static inline int inat_is_force64(insn_attr_t attr)
-{
-       return attr & INAT_FORCE64;
-}
-
-static inline int inat_has_second_immediate(insn_attr_t attr)
-{
-       return attr & INAT_SCNDIMM;
-}
-
-static inline int inat_has_moffset(insn_attr_t attr)
-{
-       return attr & INAT_MOFFSET;
-}
-
-static inline int inat_has_variant(insn_attr_t attr)
-{
-       return attr & INAT_VARIANT;
-}
-
-static inline int inat_accept_vex(insn_attr_t attr)
-{
-       return attr & INAT_VEXOK;
-}
-
-static inline int inat_must_vex(insn_attr_t attr)
-{
-       return attr & (INAT_VEXONLY | INAT_EVEXONLY);
-}
-
-static inline int inat_must_evex(insn_attr_t attr)
-{
-       return attr & INAT_EVEXONLY;
-}
-#endif
diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/insn/inat_types.h
deleted file mode 100644 (file)
index cb3c20c..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _ASM_X86_INAT_TYPES_H
-#define _ASM_X86_INAT_TYPES_H
-/*
- * x86 instruction attributes
- *
- * Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-
-/* Instruction attributes */
-typedef unsigned int insn_attr_t;
-typedef unsigned char insn_byte_t;
-typedef signed int insn_value_t;
-
-#endif
diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/insn/insn.c
deleted file mode 100644 (file)
index ca983e2..0000000
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * x86 instruction analysis
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004, 2009
- */
-
-#ifdef __KERNEL__
-#include <linux/string.h>
-#else
-#include <string.h>
-#endif
-#include "inat.h"
-#include "insn.h"
-
-/* Verify next sizeof(t) bytes can be on the same instruction */
-#define validate_next(t, insn, n)      \
-       ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
-
-#define __get_next(t, insn)    \
-       ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
-
-#define __peek_nbyte_next(t, insn, n)  \
-       ({ t r = *(t*)((insn)->next_byte + n); r; })
-
-#define get_next(t, insn)      \
-       ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
-
-#define peek_nbyte_next(t, insn, n)    \
-       ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
-
-#define peek_next(t, insn)     peek_nbyte_next(t, insn, 0)
-
-/**
- * insn_init() - initialize struct insn
- * @insn:      &struct insn to be initialized
- * @kaddr:     address (in kernel memory) of instruction (or copy thereof)
- * @x86_64:    !0 for 64-bit kernel or 64-bit app
- */
-void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
-{
-       /*
-        * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
-        * even if the input buffer is long enough to hold them.
-        */
-       if (buf_len > MAX_INSN_SIZE)
-               buf_len = MAX_INSN_SIZE;
-
-       memset(insn, 0, sizeof(*insn));
-       insn->kaddr = kaddr;
-       insn->end_kaddr = kaddr + buf_len;
-       insn->next_byte = kaddr;
-       insn->x86_64 = x86_64 ? 1 : 0;
-       insn->opnd_bytes = 4;
-       if (x86_64)
-               insn->addr_bytes = 8;
-       else
-               insn->addr_bytes = 4;
-}
-
-/**
- * insn_get_prefixes - scan x86 instruction prefix bytes
- * @insn:      &struct insn containing instruction
- *
- * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
- * to point to the (first) opcode.  No effect if @insn->prefixes.got
- * is already set.
- */
-void insn_get_prefixes(struct insn *insn)
-{
-       struct insn_field *prefixes = &insn->prefixes;
-       insn_attr_t attr;
-       insn_byte_t b, lb;
-       int i, nb;
-
-       if (prefixes->got)
-               return;
-
-       nb = 0;
-       lb = 0;
-       b = peek_next(insn_byte_t, insn);
-       attr = inat_get_opcode_attribute(b);
-       while (inat_is_legacy_prefix(attr)) {
-               /* Skip if same prefix */
-               for (i = 0; i < nb; i++)
-                       if (prefixes->bytes[i] == b)
-                               goto found;
-               if (nb == 4)
-                       /* Invalid instruction */
-                       break;
-               prefixes->bytes[nb++] = b;
-               if (inat_is_address_size_prefix(attr)) {
-                       /* address size switches 2/4 or 4/8 */
-                       if (insn->x86_64)
-                               insn->addr_bytes ^= 12;
-                       else
-                               insn->addr_bytes ^= 6;
-               } else if (inat_is_operand_size_prefix(attr)) {
-                       /* oprand size switches 2/4 */
-                       insn->opnd_bytes ^= 6;
-               }
-found:
-               prefixes->nbytes++;
-               insn->next_byte++;
-               lb = b;
-               b = peek_next(insn_byte_t, insn);
-               attr = inat_get_opcode_attribute(b);
-       }
-       /* Set the last prefix */
-       if (lb && lb != insn->prefixes.bytes[3]) {
-               if (unlikely(insn->prefixes.bytes[3])) {
-                       /* Swap the last prefix */
-                       b = insn->prefixes.bytes[3];
-                       for (i = 0; i < nb; i++)
-                               if (prefixes->bytes[i] == lb)
-                                       prefixes->bytes[i] = b;
-               }
-               insn->prefixes.bytes[3] = lb;
-       }
-
-       /* Decode REX prefix */
-       if (insn->x86_64) {
-               b = peek_next(insn_byte_t, insn);
-               attr = inat_get_opcode_attribute(b);
-               if (inat_is_rex_prefix(attr)) {
-                       insn->rex_prefix.value = b;
-                       insn->rex_prefix.nbytes = 1;
-                       insn->next_byte++;
-                       if (X86_REX_W(b))
-                               /* REX.W overrides opnd_size */
-                               insn->opnd_bytes = 8;
-               }
-       }
-       insn->rex_prefix.got = 1;
-
-       /* Decode VEX prefix */
-       b = peek_next(insn_byte_t, insn);
-       attr = inat_get_opcode_attribute(b);
-       if (inat_is_vex_prefix(attr)) {
-               insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
-               if (!insn->x86_64) {
-                       /*
-                        * In 32-bits mode, if the [7:6] bits (mod bits of
-                        * ModRM) on the second byte are not 11b, it is
-                        * LDS or LES or BOUND.
-                        */
-                       if (X86_MODRM_MOD(b2) != 3)
-                               goto vex_end;
-               }
-               insn->vex_prefix.bytes[0] = b;
-               insn->vex_prefix.bytes[1] = b2;
-               if (inat_is_evex_prefix(attr)) {
-                       b2 = peek_nbyte_next(insn_byte_t, insn, 2);
-                       insn->vex_prefix.bytes[2] = b2;
-                       b2 = peek_nbyte_next(insn_byte_t, insn, 3);
-                       insn->vex_prefix.bytes[3] = b2;
-                       insn->vex_prefix.nbytes = 4;
-                       insn->next_byte += 4;
-                       if (insn->x86_64 && X86_VEX_W(b2))
-                               /* VEX.W overrides opnd_size */
-                               insn->opnd_bytes = 8;
-               } else if (inat_is_vex3_prefix(attr)) {
-                       b2 = peek_nbyte_next(insn_byte_t, insn, 2);
-                       insn->vex_prefix.bytes[2] = b2;
-                       insn->vex_prefix.nbytes = 3;
-                       insn->next_byte += 3;
-                       if (insn->x86_64 && X86_VEX_W(b2))
-                               /* VEX.W overrides opnd_size */
-                               insn->opnd_bytes = 8;
-               } else {
-                       /*
-                        * For VEX2, fake VEX3-like byte#2.
-                        * Makes it easier to decode vex.W, vex.vvvv,
-                        * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
-                        */
-                       insn->vex_prefix.bytes[2] = b2 & 0x7f;
-                       insn->vex_prefix.nbytes = 2;
-                       insn->next_byte += 2;
-               }
-       }
-vex_end:
-       insn->vex_prefix.got = 1;
-
-       prefixes->got = 1;
-
-err_out:
-       return;
-}
-
-/**
- * insn_get_opcode - collect opcode(s)
- * @insn:      &struct insn containing instruction
- *
- * Populates @insn->opcode, updates @insn->next_byte to point past the
- * opcode byte(s), and set @insn->attr (except for groups).
- * If necessary, first collects any preceding (prefix) bytes.
- * Sets @insn->opcode.value = opcode1.  No effect if @insn->opcode.got
- * is already 1.
- */
-void insn_get_opcode(struct insn *insn)
-{
-       struct insn_field *opcode = &insn->opcode;
-       insn_byte_t op;
-       int pfx_id;
-       if (opcode->got)
-               return;
-       if (!insn->prefixes.got)
-               insn_get_prefixes(insn);
-
-       /* Get first opcode */
-       op = get_next(insn_byte_t, insn);
-       opcode->bytes[0] = op;
-       opcode->nbytes = 1;
-
-       /* Check if there is VEX prefix or not */
-       if (insn_is_avx(insn)) {
-               insn_byte_t m, p;
-               m = insn_vex_m_bits(insn);
-               p = insn_vex_p_bits(insn);
-               insn->attr = inat_get_avx_attribute(op, m, p);
-               if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
-                   (!inat_accept_vex(insn->attr) &&
-                    !inat_is_group(insn->attr)))
-                       insn->attr = 0; /* This instruction is bad */
-               goto end;       /* VEX has only 1 byte for opcode */
-       }
-
-       insn->attr = inat_get_opcode_attribute(op);
-       while (inat_is_escape(insn->attr)) {
-               /* Get escaped opcode */
-               op = get_next(insn_byte_t, insn);
-               opcode->bytes[opcode->nbytes++] = op;
-               pfx_id = insn_last_prefix_id(insn);
-               insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
-       }
-       if (inat_must_vex(insn->attr))
-               insn->attr = 0; /* This instruction is bad */
-end:
-       opcode->got = 1;
-
-err_out:
-       return;
-}
-
-/**
- * insn_get_modrm - collect ModRM byte, if any
- * @insn:      &struct insn containing instruction
- *
- * Populates @insn->modrm and updates @insn->next_byte to point past the
- * ModRM byte, if any.  If necessary, first collects the preceding bytes
- * (prefixes and opcode(s)).  No effect if @insn->modrm.got is already 1.
- */
-void insn_get_modrm(struct insn *insn)
-{
-       struct insn_field *modrm = &insn->modrm;
-       insn_byte_t pfx_id, mod;
-       if (modrm->got)
-               return;
-       if (!insn->opcode.got)
-               insn_get_opcode(insn);
-
-       if (inat_has_modrm(insn->attr)) {
-               mod = get_next(insn_byte_t, insn);
-               modrm->value = mod;
-               modrm->nbytes = 1;
-               if (inat_is_group(insn->attr)) {
-                       pfx_id = insn_last_prefix_id(insn);
-                       insn->attr = inat_get_group_attribute(mod, pfx_id,
-                                                             insn->attr);
-                       if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
-                               insn->attr = 0; /* This is bad */
-               }
-       }
-
-       if (insn->x86_64 && inat_is_force64(insn->attr))
-               insn->opnd_bytes = 8;
-       modrm->got = 1;
-
-err_out:
-       return;
-}
-
-
-/**
- * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
- * @insn:      &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * ModRM byte.  No effect if @insn->x86_64 is 0.
- */
-int insn_rip_relative(struct insn *insn)
-{
-       struct insn_field *modrm = &insn->modrm;
-
-       if (!insn->x86_64)
-               return 0;
-       if (!modrm->got)
-               insn_get_modrm(insn);
-       /*
-        * For rip-relative instructions, the mod field (top 2 bits)
-        * is zero and the r/m field (bottom 3 bits) is 0x5.
-        */
-       return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
-}
-
-/**
- * insn_get_sib() - Get the SIB byte of instruction
- * @insn:      &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * ModRM byte.
- */
-void insn_get_sib(struct insn *insn)
-{
-       insn_byte_t modrm;
-
-       if (insn->sib.got)
-               return;
-       if (!insn->modrm.got)
-               insn_get_modrm(insn);
-       if (insn->modrm.nbytes) {
-               modrm = (insn_byte_t)insn->modrm.value;
-               if (insn->addr_bytes != 2 &&
-                   X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
-                       insn->sib.value = get_next(insn_byte_t, insn);
-                       insn->sib.nbytes = 1;
-               }
-       }
-       insn->sib.got = 1;
-
-err_out:
-       return;
-}
-
-
-/**
- * insn_get_displacement() - Get the displacement of instruction
- * @insn:      &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * SIB byte.
- * Displacement value is sign-expanded.
- */
-void insn_get_displacement(struct insn *insn)
-{
-       insn_byte_t mod, rm, base;
-
-       if (insn->displacement.got)
-               return;
-       if (!insn->sib.got)
-               insn_get_sib(insn);
-       if (insn->modrm.nbytes) {
-               /*
-                * Interpreting the modrm byte:
-                * mod = 00 - no displacement fields (exceptions below)
-                * mod = 01 - 1-byte displacement field
-                * mod = 10 - displacement field is 4 bytes, or 2 bytes if
-                *      address size = 2 (0x67 prefix in 32-bit mode)
-                * mod = 11 - no memory operand
-                *
-                * If address size = 2...
-                * mod = 00, r/m = 110 - displacement field is 2 bytes
-                *
-                * If address size != 2...
-                * mod != 11, r/m = 100 - SIB byte exists
-                * mod = 00, SIB base = 101 - displacement field is 4 bytes
-                * mod = 00, r/m = 101 - rip-relative addressing, displacement
-                *      field is 4 bytes
-                */
-               mod = X86_MODRM_MOD(insn->modrm.value);
-               rm = X86_MODRM_RM(insn->modrm.value);
-               base = X86_SIB_BASE(insn->sib.value);
-               if (mod == 3)
-                       goto out;
-               if (mod == 1) {
-                       insn->displacement.value = get_next(signed char, insn);
-                       insn->displacement.nbytes = 1;
-               } else if (insn->addr_bytes == 2) {
-                       if ((mod == 0 && rm == 6) || mod == 2) {
-                               insn->displacement.value =
-                                        get_next(short, insn);
-                               insn->displacement.nbytes = 2;
-                       }
-               } else {
-                       if ((mod == 0 && rm == 5) || mod == 2 ||
-                           (mod == 0 && base == 5)) {
-                               insn->displacement.value = get_next(int, insn);
-                               insn->displacement.nbytes = 4;
-                       }
-               }
-       }
-out:
-       insn->displacement.got = 1;
-
-err_out:
-       return;
-}
-
-/* Decode moffset16/32/64. Return 0 if failed */
-static int __get_moffset(struct insn *insn)
-{
-       switch (insn->addr_bytes) {
-       case 2:
-               insn->moffset1.value = get_next(short, insn);
-               insn->moffset1.nbytes = 2;
-               break;
-       case 4:
-               insn->moffset1.value = get_next(int, insn);
-               insn->moffset1.nbytes = 4;
-               break;
-       case 8:
-               insn->moffset1.value = get_next(int, insn);
-               insn->moffset1.nbytes = 4;
-               insn->moffset2.value = get_next(int, insn);
-               insn->moffset2.nbytes = 4;
-               break;
-       default:        /* opnd_bytes must be modified manually */
-               goto err_out;
-       }
-       insn->moffset1.got = insn->moffset2.got = 1;
-
-       return 1;
-
-err_out:
-       return 0;
-}
-
-/* Decode imm v32(Iz). Return 0 if failed */
-static int __get_immv32(struct insn *insn)
-{
-       switch (insn->opnd_bytes) {
-       case 2:
-               insn->immediate.value = get_next(short, insn);
-               insn->immediate.nbytes = 2;
-               break;
-       case 4:
-       case 8:
-               insn->immediate.value = get_next(int, insn);
-               insn->immediate.nbytes = 4;
-               break;
-       default:        /* opnd_bytes must be modified manually */
-               goto err_out;
-       }
-
-       return 1;
-
-err_out:
-       return 0;
-}
-
-/* Decode imm v64(Iv/Ov), Return 0 if failed */
-static int __get_immv(struct insn *insn)
-{
-       switch (insn->opnd_bytes) {
-       case 2:
-               insn->immediate1.value = get_next(short, insn);
-               insn->immediate1.nbytes = 2;
-               break;
-       case 4:
-               insn->immediate1.value = get_next(int, insn);
-               insn->immediate1.nbytes = 4;
-               break;
-       case 8:
-               insn->immediate1.value = get_next(int, insn);
-               insn->immediate1.nbytes = 4;
-               insn->immediate2.value = get_next(int, insn);
-               insn->immediate2.nbytes = 4;
-               break;
-       default:        /* opnd_bytes must be modified manually */
-               goto err_out;
-       }
-       insn->immediate1.got = insn->immediate2.got = 1;
-
-       return 1;
-err_out:
-       return 0;
-}
-
-/* Decode ptr16:16/32(Ap) */
-static int __get_immptr(struct insn *insn)
-{
-       switch (insn->opnd_bytes) {
-       case 2:
-               insn->immediate1.value = get_next(short, insn);
-               insn->immediate1.nbytes = 2;
-               break;
-       case 4:
-               insn->immediate1.value = get_next(int, insn);
-               insn->immediate1.nbytes = 4;
-               break;
-       case 8:
-               /* ptr16:64 is not exist (no segment) */
-               return 0;
-       default:        /* opnd_bytes must be modified manually */
-               goto err_out;
-       }
-       insn->immediate2.value = get_next(unsigned short, insn);
-       insn->immediate2.nbytes = 2;
-       insn->immediate1.got = insn->immediate2.got = 1;
-
-       return 1;
-err_out:
-       return 0;
-}
-
-/**
- * insn_get_immediate() - Get the immediates of instruction
- * @insn:      &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * displacement bytes.
- * Basically, most of immediates are sign-expanded. Unsigned-value can be
- * get by bit masking with ((1 << (nbytes * 8)) - 1)
- */
-void insn_get_immediate(struct insn *insn)
-{
-       if (insn->immediate.got)
-               return;
-       if (!insn->displacement.got)
-               insn_get_displacement(insn);
-
-       if (inat_has_moffset(insn->attr)) {
-               if (!__get_moffset(insn))
-                       goto err_out;
-               goto done;
-       }
-
-       if (!inat_has_immediate(insn->attr))
-               /* no immediates */
-               goto done;
-
-       switch (inat_immediate_size(insn->attr)) {
-       case INAT_IMM_BYTE:
-               insn->immediate.value = get_next(signed char, insn);
-               insn->immediate.nbytes = 1;
-               break;
-       case INAT_IMM_WORD:
-               insn->immediate.value = get_next(short, insn);
-               insn->immediate.nbytes = 2;
-               break;
-       case INAT_IMM_DWORD:
-               insn->immediate.value = get_next(int, insn);
-               insn->immediate.nbytes = 4;
-               break;
-       case INAT_IMM_QWORD:
-               insn->immediate1.value = get_next(int, insn);
-               insn->immediate1.nbytes = 4;
-               insn->immediate2.value = get_next(int, insn);
-               insn->immediate2.nbytes = 4;
-               break;
-       case INAT_IMM_PTR:
-               if (!__get_immptr(insn))
-                       goto err_out;
-               break;
-       case INAT_IMM_VWORD32:
-               if (!__get_immv32(insn))
-                       goto err_out;
-               break;
-       case INAT_IMM_VWORD:
-               if (!__get_immv(insn))
-                       goto err_out;
-               break;
-       default:
-               /* Here, insn must have an immediate, but failed */
-               goto err_out;
-       }
-       if (inat_has_second_immediate(insn->attr)) {
-               insn->immediate2.value = get_next(signed char, insn);
-               insn->immediate2.nbytes = 1;
-       }
-done:
-       insn->immediate.got = 1;
-
-err_out:
-       return;
-}
-
-/**
- * insn_get_length() - Get the length of instruction
- * @insn:      &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * immediates bytes.
- */
-void insn_get_length(struct insn *insn)
-{
-       if (insn->length)
-               return;
-       if (!insn->immediate.got)
-               insn_get_immediate(insn);
-       insn->length = (unsigned char)((unsigned long)insn->next_byte
-                                    - (unsigned long)insn->kaddr);
-}
diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/insn/insn.h
deleted file mode 100644 (file)
index e23578c..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-#ifndef _ASM_X86_INSN_H
-#define _ASM_X86_INSN_H
-/*
- * x86 instruction analysis
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2009
- */
-
-/* insn_attr_t is defined in inat.h */
-#include "inat.h"
-
-struct insn_field {
-       union {
-               insn_value_t value;
-               insn_byte_t bytes[4];
-       };
-       /* !0 if we've run insn_get_xxx() for this field */
-       unsigned char got;
-       unsigned char nbytes;
-};
-
-struct insn {
-       struct insn_field prefixes;     /*
-                                        * Prefixes
-                                        * prefixes.bytes[3]: last prefix
-                                        */
-       struct insn_field rex_prefix;   /* REX prefix */
-       struct insn_field vex_prefix;   /* VEX prefix */
-       struct insn_field opcode;       /*
-                                        * opcode.bytes[0]: opcode1
-                                        * opcode.bytes[1]: opcode2
-                                        * opcode.bytes[2]: opcode3
-                                        */
-       struct insn_field modrm;
-       struct insn_field sib;
-       struct insn_field displacement;
-       union {
-               struct insn_field immediate;
-               struct insn_field moffset1;     /* for 64bit MOV */
-               struct insn_field immediate1;   /* for 64bit imm or off16/32 */
-       };
-       union {
-               struct insn_field moffset2;     /* for 64bit MOV */
-               struct insn_field immediate2;   /* for 64bit imm or seg16 */
-       };
-
-       insn_attr_t attr;
-       unsigned char opnd_bytes;
-       unsigned char addr_bytes;
-       unsigned char length;
-       unsigned char x86_64;
-
-       const insn_byte_t *kaddr;       /* kernel address of insn to analyze */
-       const insn_byte_t *end_kaddr;   /* kernel address of last insn in buffer */
-       const insn_byte_t *next_byte;
-};
-
-#define MAX_INSN_SIZE  15
-
-#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
-#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
-#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
-
-#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
-#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
-#define X86_SIB_BASE(sib) ((sib) & 0x07)
-
-#define X86_REX_W(rex) ((rex) & 8)
-#define X86_REX_R(rex) ((rex) & 4)
-#define X86_REX_X(rex) ((rex) & 2)
-#define X86_REX_B(rex) ((rex) & 1)
-
-/* VEX bit flags  */
-#define X86_VEX_W(vex) ((vex) & 0x80)  /* VEX3 Byte2 */
-#define X86_VEX_R(vex) ((vex) & 0x80)  /* VEX2/3 Byte1 */
-#define X86_VEX_X(vex) ((vex) & 0x40)  /* VEX3 Byte1 */
-#define X86_VEX_B(vex) ((vex) & 0x20)  /* VEX3 Byte1 */
-#define X86_VEX_L(vex) ((vex) & 0x04)  /* VEX3 Byte2, VEX2 Byte1 */
-/* VEX bit fields */
-#define X86_EVEX_M(vex)        ((vex) & 0x03)          /* EVEX Byte1 */
-#define X86_VEX3_M(vex)        ((vex) & 0x1f)          /* VEX3 Byte1 */
-#define X86_VEX2_M     1                       /* VEX2.M always 1 */
-#define X86_VEX_V(vex) (((vex) & 0x78) >> 3)   /* VEX3 Byte2, VEX2 Byte1 */
-#define X86_VEX_P(vex) ((vex) & 0x03)          /* VEX3 Byte2, VEX2 Byte1 */
-#define X86_VEX_M_MAX  0x1f                    /* VEX3.M Maximum value */
-
-extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
-extern void insn_get_prefixes(struct insn *insn);
-extern void insn_get_opcode(struct insn *insn);
-extern void insn_get_modrm(struct insn *insn);
-extern void insn_get_sib(struct insn *insn);
-extern void insn_get_displacement(struct insn *insn);
-extern void insn_get_immediate(struct insn *insn);
-extern void insn_get_length(struct insn *insn);
-
-/* Attribute will be determined after getting ModRM (for opcode groups) */
-static inline void insn_get_attribute(struct insn *insn)
-{
-       insn_get_modrm(insn);
-}
-
-/* Instruction uses RIP-relative addressing */
-extern int insn_rip_relative(struct insn *insn);
-
-/* Init insn for kernel text */
-static inline void kernel_insn_init(struct insn *insn,
-                                   const void *kaddr, int buf_len)
-{
-#ifdef CONFIG_X86_64
-       insn_init(insn, kaddr, buf_len, 1);
-#else /* CONFIG_X86_32 */
-       insn_init(insn, kaddr, buf_len, 0);
-#endif
-}
-
-static inline int insn_is_avx(struct insn *insn)
-{
-       if (!insn->prefixes.got)
-               insn_get_prefixes(insn);
-       return (insn->vex_prefix.value != 0);
-}
-
-static inline int insn_is_evex(struct insn *insn)
-{
-       if (!insn->prefixes.got)
-               insn_get_prefixes(insn);
-       return (insn->vex_prefix.nbytes == 4);
-}
-
-/* Ensure this instruction is decoded completely */
-static inline int insn_complete(struct insn *insn)
-{
-       return insn->opcode.got && insn->modrm.got && insn->sib.got &&
-               insn->displacement.got && insn->immediate.got;
-}
-
-static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
-{
-       if (insn->vex_prefix.nbytes == 2)       /* 2 bytes VEX */
-               return X86_VEX2_M;
-       else if (insn->vex_prefix.nbytes == 3)  /* 3 bytes VEX */
-               return X86_VEX3_M(insn->vex_prefix.bytes[1]);
-       else                                    /* EVEX */
-               return X86_EVEX_M(insn->vex_prefix.bytes[1]);
-}
-
-static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
-{
-       if (insn->vex_prefix.nbytes == 2)       /* 2 bytes VEX */
-               return X86_VEX_P(insn->vex_prefix.bytes[1]);
-       else
-               return X86_VEX_P(insn->vex_prefix.bytes[2]);
-}
-
-/* Get the last prefix id from last prefix or VEX prefix */
-static inline int insn_last_prefix_id(struct insn *insn)
-{
-       if (insn_is_avx(insn))
-               return insn_vex_p_bits(insn);   /* VEX_p is a SIMD prefix id */
-
-       if (insn->prefixes.bytes[3])
-               return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
-
-       return 0;
-}
-
-/* Offset of each field from kaddr */
-static inline int insn_offset_rex_prefix(struct insn *insn)
-{
-       return insn->prefixes.nbytes;
-}
-static inline int insn_offset_vex_prefix(struct insn *insn)
-{
-       return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
-}
-static inline int insn_offset_opcode(struct insn *insn)
-{
-       return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
-}
-static inline int insn_offset_modrm(struct insn *insn)
-{
-       return insn_offset_opcode(insn) + insn->opcode.nbytes;
-}
-static inline int insn_offset_sib(struct insn *insn)
-{
-       return insn_offset_modrm(insn) + insn->modrm.nbytes;
-}
-static inline int insn_offset_displacement(struct insn *insn)
-{
-       return insn_offset_sib(insn) + insn->sib.nbytes;
-}
-static inline int insn_offset_immediate(struct insn *insn)
-{
-       return insn_offset_displacement(insn) + insn->displacement.nbytes;
-}
-
-#endif /* _ASM_X86_INSN_H */
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
deleted file mode 100644 (file)
index 12e3771..0000000
+++ /dev/null
@@ -1,1063 +0,0 @@
-# x86 Opcode Maps
-#
-# This is (mostly) based on following documentations.
-# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
-#   (#326018-047US, June 2013)
-#
-#<Opcode maps>
-# Table: table-name
-# Referrer: escaped-name
-# AVXcode: avx-code
-# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
-# (or)
-# opcode: escape # escaped-name
-# EndTable
-#
-# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
-# mnemonics that begin with lowercase 'k' accept a VEX prefix
-#
-#<group maps>
-# GrpTable: GrpXXX
-# reg:  mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
-# EndTable
-#
-# AVX Superscripts
-#  (ev): this opcode requires EVEX prefix.
-#  (evo): this opcode is changed by EVEX prefix (EVEX opcode)
-#  (v): this opcode requires VEX prefix.
-#  (v1): this opcode only supports 128bit VEX.
-#
-# Last Prefix Superscripts
-#  - (66): the last prefix is 0x66
-#  - (F3): the last prefix is 0xF3
-#  - (F2): the last prefix is 0xF2
-#  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
-#  - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
-
-Table: one byte opcode
-Referrer:
-AVXcode:
-# 0x00 - 0x0f
-00: ADD Eb,Gb
-01: ADD Ev,Gv
-02: ADD Gb,Eb
-03: ADD Gv,Ev
-04: ADD AL,Ib
-05: ADD rAX,Iz
-06: PUSH ES (i64)
-07: POP ES (i64)
-08: OR Eb,Gb
-09: OR Ev,Gv
-0a: OR Gb,Eb
-0b: OR Gv,Ev
-0c: OR AL,Ib
-0d: OR rAX,Iz
-0e: PUSH CS (i64)
-0f: escape # 2-byte escape
-# 0x10 - 0x1f
-10: ADC Eb,Gb
-11: ADC Ev,Gv
-12: ADC Gb,Eb
-13: ADC Gv,Ev
-14: ADC AL,Ib
-15: ADC rAX,Iz
-16: PUSH SS (i64)
-17: POP SS (i64)
-18: SBB Eb,Gb
-19: SBB Ev,Gv
-1a: SBB Gb,Eb
-1b: SBB Gv,Ev
-1c: SBB AL,Ib
-1d: SBB rAX,Iz
-1e: PUSH DS (i64)
-1f: POP DS (i64)
-# 0x20 - 0x2f
-20: AND Eb,Gb
-21: AND Ev,Gv
-22: AND Gb,Eb
-23: AND Gv,Ev
-24: AND AL,Ib
-25: AND rAx,Iz
-26: SEG=ES (Prefix)
-27: DAA (i64)
-28: SUB Eb,Gb
-29: SUB Ev,Gv
-2a: SUB Gb,Eb
-2b: SUB Gv,Ev
-2c: SUB AL,Ib
-2d: SUB rAX,Iz
-2e: SEG=CS (Prefix)
-2f: DAS (i64)
-# 0x30 - 0x3f
-30: XOR Eb,Gb
-31: XOR Ev,Gv
-32: XOR Gb,Eb
-33: XOR Gv,Ev
-34: XOR AL,Ib
-35: XOR rAX,Iz
-36: SEG=SS (Prefix)
-37: AAA (i64)
-38: CMP Eb,Gb
-39: CMP Ev,Gv
-3a: CMP Gb,Eb
-3b: CMP Gv,Ev
-3c: CMP AL,Ib
-3d: CMP rAX,Iz
-3e: SEG=DS (Prefix)
-3f: AAS (i64)
-# 0x40 - 0x4f
-40: INC eAX (i64) | REX (o64)
-41: INC eCX (i64) | REX.B (o64)
-42: INC eDX (i64) | REX.X (o64)
-43: INC eBX (i64) | REX.XB (o64)
-44: INC eSP (i64) | REX.R (o64)
-45: INC eBP (i64) | REX.RB (o64)
-46: INC eSI (i64) | REX.RX (o64)
-47: INC eDI (i64) | REX.RXB (o64)
-48: DEC eAX (i64) | REX.W (o64)
-49: DEC eCX (i64) | REX.WB (o64)
-4a: DEC eDX (i64) | REX.WX (o64)
-4b: DEC eBX (i64) | REX.WXB (o64)
-4c: DEC eSP (i64) | REX.WR (o64)
-4d: DEC eBP (i64) | REX.WRB (o64)
-4e: DEC eSI (i64) | REX.WRX (o64)
-4f: DEC eDI (i64) | REX.WRXB (o64)
-# 0x50 - 0x5f
-50: PUSH rAX/r8 (d64)
-51: PUSH rCX/r9 (d64)
-52: PUSH rDX/r10 (d64)
-53: PUSH rBX/r11 (d64)
-54: PUSH rSP/r12 (d64)
-55: PUSH rBP/r13 (d64)
-56: PUSH rSI/r14 (d64)
-57: PUSH rDI/r15 (d64)
-58: POP rAX/r8 (d64)
-59: POP rCX/r9 (d64)
-5a: POP rDX/r10 (d64)
-5b: POP rBX/r11 (d64)
-5c: POP rSP/r12 (d64)
-5d: POP rBP/r13 (d64)
-5e: POP rSI/r14 (d64)
-5f: POP rDI/r15 (d64)
-# 0x60 - 0x6f
-60: PUSHA/PUSHAD (i64)
-61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
-63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
-64: SEG=FS (Prefix)
-65: SEG=GS (Prefix)
-66: Operand-Size (Prefix)
-67: Address-Size (Prefix)
-68: PUSH Iz (d64)
-69: IMUL Gv,Ev,Iz
-6a: PUSH Ib (d64)
-6b: IMUL Gv,Ev,Ib
-6c: INS/INSB Yb,DX
-6d: INS/INSW/INSD Yz,DX
-6e: OUTS/OUTSB DX,Xb
-6f: OUTS/OUTSW/OUTSD DX,Xz
-# 0x70 - 0x7f
-70: JO Jb
-71: JNO Jb
-72: JB/JNAE/JC Jb
-73: JNB/JAE/JNC Jb
-74: JZ/JE Jb
-75: JNZ/JNE Jb
-76: JBE/JNA Jb
-77: JNBE/JA Jb
-78: JS Jb
-79: JNS Jb
-7a: JP/JPE Jb
-7b: JNP/JPO Jb
-7c: JL/JNGE Jb
-7d: JNL/JGE Jb
-7e: JLE/JNG Jb
-7f: JNLE/JG Jb
-# 0x80 - 0x8f
-80: Grp1 Eb,Ib (1A)
-81: Grp1 Ev,Iz (1A)
-82: Grp1 Eb,Ib (1A),(i64)
-83: Grp1 Ev,Ib (1A)
-84: TEST Eb,Gb
-85: TEST Ev,Gv
-86: XCHG Eb,Gb
-87: XCHG Ev,Gv
-88: MOV Eb,Gb
-89: MOV Ev,Gv
-8a: MOV Gb,Eb
-8b: MOV Gv,Ev
-8c: MOV Ev,Sw
-8d: LEA Gv,M
-8e: MOV Sw,Ew
-8f: Grp1A (1A) | POP Ev (d64)
-# 0x90 - 0x9f
-90: NOP | PAUSE (F3) | XCHG r8,rAX
-91: XCHG rCX/r9,rAX
-92: XCHG rDX/r10,rAX
-93: XCHG rBX/r11,rAX
-94: XCHG rSP/r12,rAX
-95: XCHG rBP/r13,rAX
-96: XCHG rSI/r14,rAX
-97: XCHG rDI/r15,rAX
-98: CBW/CWDE/CDQE
-99: CWD/CDQ/CQO
-9a: CALLF Ap (i64)
-9b: FWAIT/WAIT
-9c: PUSHF/D/Q Fv (d64)
-9d: POPF/D/Q Fv (d64)
-9e: SAHF
-9f: LAHF
-# 0xa0 - 0xaf
-a0: MOV AL,Ob
-a1: MOV rAX,Ov
-a2: MOV Ob,AL
-a3: MOV Ov,rAX
-a4: MOVS/B Yb,Xb
-a5: MOVS/W/D/Q Yv,Xv
-a6: CMPS/B Xb,Yb
-a7: CMPS/W/D Xv,Yv
-a8: TEST AL,Ib
-a9: TEST rAX,Iz
-aa: STOS/B Yb,AL
-ab: STOS/W/D/Q Yv,rAX
-ac: LODS/B AL,Xb
-ad: LODS/W/D/Q rAX,Xv
-ae: SCAS/B AL,Yb
-# Note: The May 2011 Intel manual shows Xv for the second parameter of the
-# next instruction but Yv is correct
-af: SCAS/W/D/Q rAX,Yv
-# 0xb0 - 0xbf
-b0: MOV AL/R8L,Ib
-b1: MOV CL/R9L,Ib
-b2: MOV DL/R10L,Ib
-b3: MOV BL/R11L,Ib
-b4: MOV AH/R12L,Ib
-b5: MOV CH/R13L,Ib
-b6: MOV DH/R14L,Ib
-b7: MOV BH/R15L,Ib
-b8: MOV rAX/r8,Iv
-b9: MOV rCX/r9,Iv
-ba: MOV rDX/r10,Iv
-bb: MOV rBX/r11,Iv
-bc: MOV rSP/r12,Iv
-bd: MOV rBP/r13,Iv
-be: MOV rSI/r14,Iv
-bf: MOV rDI/r15,Iv
-# 0xc0 - 0xcf
-c0: Grp2 Eb,Ib (1A)
-c1: Grp2 Ev,Ib (1A)
-c2: RETN Iw (f64)
-c3: RETN
-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
-c6: Grp11A Eb,Ib (1A)
-c7: Grp11B Ev,Iz (1A)
-c8: ENTER Iw,Ib
-c9: LEAVE (d64)
-ca: RETF Iw
-cb: RETF
-cc: INT3
-cd: INT Ib
-ce: INTO (i64)
-cf: IRET/D/Q
-# 0xd0 - 0xdf
-d0: Grp2 Eb,1 (1A)
-d1: Grp2 Ev,1 (1A)
-d2: Grp2 Eb,CL (1A)
-d3: Grp2 Ev,CL (1A)
-d4: AAM Ib (i64)
-d5: AAD Ib (i64)
-d6:
-d7: XLAT/XLATB
-d8: ESC
-d9: ESC
-da: ESC
-db: ESC
-dc: ESC
-dd: ESC
-de: ESC
-df: ESC
-# 0xe0 - 0xef
-# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
-# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
-# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64)
-e1: LOOPE/LOOPZ Jb (f64)
-e2: LOOP Jb (f64)
-e3: JrCXZ Jb (f64)
-e4: IN AL,Ib
-e5: IN eAX,Ib
-e6: OUT Ib,AL
-e7: OUT Ib,eAX
-# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
-# in "near" jumps and calls is 16-bit. For CALL,
-# push of return address is 16-bit wide, RSP is decremented by 2
-# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64)
-e9: JMP-near Jz (f64)
-ea: JMP-far Ap (i64)
-eb: JMP-short Jb (f64)
-ec: IN AL,DX
-ed: IN eAX,DX
-ee: OUT DX,AL
-ef: OUT DX,eAX
-# 0xf0 - 0xff
-f0: LOCK (Prefix)
-f1:
-f2: REPNE (Prefix) | XACQUIRE (Prefix)
-f3: REP/REPE (Prefix) | XRELEASE (Prefix)
-f4: HLT
-f5: CMC
-f6: Grp3_1 Eb (1A)
-f7: Grp3_2 Ev (1A)
-f8: CLC
-f9: STC
-fa: CLI
-fb: STI
-fc: CLD
-fd: STD
-fe: Grp4 (1A)
-ff: Grp5 (1A)
-EndTable
-
-Table: 2-byte opcode (0x0f)
-Referrer: 2-byte escape
-AVXcode: 1
-# 0x0f 0x00-0x0f
-00: Grp6 (1A)
-01: Grp7 (1A)
-02: LAR Gv,Ew
-03: LSL Gv,Ew
-04:
-05: SYSCALL (o64)
-06: CLTS
-07: SYSRET (o64)
-08: INVD
-09: WBINVD
-0a:
-0b: UD2 (1B)
-0c:
-# AMD's prefetch group. Intel supports prefetchw(/1) only.
-0d: GrpP
-0e: FEMMS
-# 3DNow! uses the last imm byte as opcode extension.
-0f: 3DNow! Pq,Qq,Ib
-# 0x0f 0x10-0x1f
-# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
-# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
-# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
-# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
-# Reference A.1
-10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
-11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
-12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
-13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
-14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
-15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
-16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
-17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
-18: Grp16 (1A)
-19:
-# Intel SDM opcode map does not list MPX instructions. For now using Gv for
-# bnd registers and Ev for everything else is OK because the instruction
-# decoder does not use the information except as an indication that there is
-# a ModR/M byte.
-1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
-1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
-1d:
-1e:
-1f: NOP Ev
-# 0x0f 0x20-0x2f
-20: MOV Rd,Cd
-21: MOV Rd,Dd
-22: MOV Cd,Rd
-23: MOV Dd,Rd
-24:
-25:
-26:
-27:
-28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
-29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
-2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
-2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
-2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
-2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
-2e: vucomiss Vss,Wss (v1) | vucomisd  Vsd,Wsd (66),(v1)
-2f: vcomiss Vss,Wss (v1) | vcomisd  Vsd,Wsd (66),(v1)
-# 0x0f 0x30-0x3f
-30: WRMSR
-31: RDTSC
-32: RDMSR
-33: RDPMC
-34: SYSENTER
-35: SYSEXIT
-36:
-37: GETSEC
-38: escape # 3-byte escape 1
-39:
-3a: escape # 3-byte escape 2
-3b:
-3c:
-3d:
-3e:
-3f:
-# 0x0f 0x40-0x4f
-40: CMOVO Gv,Ev
-41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
-42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
-43: CMOVAE/NB/NC Gv,Ev
-44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
-45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
-46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
-47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
-48: CMOVS Gv,Ev
-49: CMOVNS Gv,Ev
-4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
-4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
-4c: CMOVL/NGE Gv,Ev
-4d: CMOVNL/GE Gv,Ev
-4e: CMOVLE/NG Gv,Ev
-4f: CMOVNLE/G Gv,Ev
-# 0x0f 0x50-0x5f
-50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
-51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
-52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
-53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
-54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
-55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
-56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
-57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
-58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
-59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
-5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
-5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
-5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
-5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
-5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
-5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
-# 0x0f 0x60-0x6f
-60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
-61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
-62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
-63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
-64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
-65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
-66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
-67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
-68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
-69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
-6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
-6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
-6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
-6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
-6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
-6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
-# 0x0f 0x70-0x7f
-70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
-71: Grp12 (1A)
-72: Grp13 (1A)
-73: Grp14 (1A)
-74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
-75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
-76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
-# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
-77: emms | vzeroupper | vzeroall
-78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
-79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
-7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
-7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
-7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
-7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
-7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
-7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
-# 0x0f 0x80-0x8f
-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64)
-81: JNO Jz (f64)
-82: JB/JC/JNAE Jz (f64)
-83: JAE/JNB/JNC Jz (f64)
-84: JE/JZ Jz (f64)
-85: JNE/JNZ Jz (f64)
-86: JBE/JNA Jz (f64)
-87: JA/JNBE Jz (f64)
-88: JS Jz (f64)
-89: JNS Jz (f64)
-8a: JP/JPE Jz (f64)
-8b: JNP/JPO Jz (f64)
-8c: JL/JNGE Jz (f64)
-8d: JNL/JGE Jz (f64)
-8e: JLE/JNG Jz (f64)
-8f: JNLE/JG Jz (f64)
-# 0x0f 0x90-0x9f
-90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
-91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
-92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
-93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
-94: SETE/Z Eb
-95: SETNE/NZ Eb
-96: SETBE/NA Eb
-97: SETA/NBE Eb
-98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
-99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
-9a: SETP/PE Eb
-9b: SETNP/PO Eb
-9c: SETL/NGE Eb
-9d: SETNL/GE Eb
-9e: SETLE/NG Eb
-9f: SETNLE/G Eb
-# 0x0f 0xa0-0xaf
-a0: PUSH FS (d64)
-a1: POP FS (d64)
-a2: CPUID
-a3: BT Ev,Gv
-a4: SHLD Ev,Gv,Ib
-a5: SHLD Ev,Gv,CL
-a6: GrpPDLK
-a7: GrpRNG
-a8: PUSH GS (d64)
-a9: POP GS (d64)
-aa: RSM
-ab: BTS Ev,Gv
-ac: SHRD Ev,Gv,Ib
-ad: SHRD Ev,Gv,CL
-ae: Grp15 (1A),(1C)
-af: IMUL Gv,Ev
-# 0x0f 0xb0-0xbf
-b0: CMPXCHG Eb,Gb
-b1: CMPXCHG Ev,Gv
-b2: LSS Gv,Mp
-b3: BTR Ev,Gv
-b4: LFS Gv,Mp
-b5: LGS Gv,Mp
-b6: MOVZX Gv,Eb
-b7: MOVZX Gv,Ew
-b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
-b9: Grp10 (1A)
-ba: Grp8 Ev,Ib (1A)
-bb: BTC Ev,Gv
-bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
-bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
-be: MOVSX Gv,Eb
-bf: MOVSX Gv,Ew
-# 0x0f 0xc0-0xcf
-c0: XADD Eb,Gb
-c1: XADD Ev,Gv
-c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
-c3: movnti My,Gy
-c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
-c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
-c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
-c7: Grp9 (1A)
-c8: BSWAP RAX/EAX/R8/R8D
-c9: BSWAP RCX/ECX/R9/R9D
-ca: BSWAP RDX/EDX/R10/R10D
-cb: BSWAP RBX/EBX/R11/R11D
-cc: BSWAP RSP/ESP/R12/R12D
-cd: BSWAP RBP/EBP/R13/R13D
-ce: BSWAP RSI/ESI/R14/R14D
-cf: BSWAP RDI/EDI/R15/R15D
-# 0x0f 0xd0-0xdf
-d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
-d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
-d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
-d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
-d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
-d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
-d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
-d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
-d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
-d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
-da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
-db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
-dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
-dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
-de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
-df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0xe0-0xef
-e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
-e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
-e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
-e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
-e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
-e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
-e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
-e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
-e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
-e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
-ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
-eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
-ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
-ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
-ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
-ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0xf0-0xff
-f0: vlddqu Vx,Mx (F2)
-f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
-f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
-f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
-f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
-f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
-f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
-f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
-f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
-f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
-fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
-fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
-fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
-fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
-fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
-EndTable
-
-Table: 3-byte opcode 1 (0x0f 0x38)
-Referrer: 3-byte escape 1
-AVXcode: 2
-# 0x0f 0x38 0x00-0x0f
-00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
-01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
-02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
-03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
-04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
-05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
-06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
-07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
-08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
-09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
-0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
-0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
-0c: vpermilps Vx,Hx,Wx (66),(v)
-0d: vpermilpd Vx,Hx,Wx (66),(v)
-0e: vtestps Vx,Wx (66),(v)
-0f: vtestpd Vx,Wx (66),(v)
-# 0x0f 0x38 0x10-0x1f
-10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
-11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
-12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
-13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
-14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
-15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
-16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
-17: vptest Vx,Wx (66)
-18: vbroadcastss Vx,Wd (66),(v)
-19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
-1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
-1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
-1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
-1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
-1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
-1f: vpabsq Vx,Wx (66),(ev)
-# 0x0f 0x38 0x20-0x2f
-20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
-21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
-22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
-23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
-24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
-25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
-26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
-27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
-28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
-29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
-2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
-2b: vpackusdw Vx,Hx,Wx (66),(v1)
-2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
-2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
-2e: vmaskmovps Mx,Hx,Vx (66),(v)
-2f: vmaskmovpd Mx,Hx,Vx (66),(v)
-# 0x0f 0x38 0x30-0x3f
-30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
-31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
-32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
-33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
-34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
-35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
-36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
-37: vpcmpgtq Vx,Hx,Wx (66),(v1)
-38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
-39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
-3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
-3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
-3c: vpmaxsb Vx,Hx,Wx (66),(v1)
-3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
-3e: vpmaxuw Vx,Hx,Wx (66),(v1)
-3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0x38 0x40-0x8f
-40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
-41: vphminposuw Vdq,Wdq (66),(v1)
-42: vgetexpps/d Vx,Wx (66),(ev)
-43: vgetexpss/d Vx,Hx,Wx (66),(ev)
-44: vplzcntd/q Vx,Wx (66),(ev)
-45: vpsrlvd/q Vx,Hx,Wx (66),(v)
-46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
-47: vpsllvd/q Vx,Hx,Wx (66),(v)
-# Skip 0x48-0x4b
-4c: vrcp14ps/d Vpd,Wpd (66),(ev)
-4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
-4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
-4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-# Skip 0x50-0x57
-58: vpbroadcastd Vx,Wx (66),(v)
-59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
-5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
-5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-# Skip 0x5c-0x63
-64: vpblendmd/q Vx,Hx,Wx (66),(ev)
-65: vblendmps/d Vx,Hx,Wx (66),(ev)
-66: vpblendmb/w Vx,Hx,Wx (66),(ev)
-# Skip 0x67-0x74
-75: vpermi2b/w Vx,Hx,Wx (66),(ev)
-76: vpermi2d/q Vx,Hx,Wx (66),(ev)
-77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
-78: vpbroadcastb Vx,Wx (66),(v)
-79: vpbroadcastw Vx,Wx (66),(v)
-7a: vpbroadcastb Vx,Rv (66),(ev)
-7b: vpbroadcastw Vx,Rv (66),(ev)
-7c: vpbroadcastd/q Vx,Rv (66),(ev)
-7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
-7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
-7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
-80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
-82: INVPCID Gy,Mdq (66)
-83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
-88: vexpandps/d Vpd,Wpd (66),(ev)
-89: vpexpandd/q Vx,Wx (66),(ev)
-8a: vcompressps/d Wx,Vx (66),(ev)
-8b: vpcompressd/q Wx,Vx (66),(ev)
-8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
-8d: vpermb/w Vx,Hx,Wx (66),(ev)
-8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
-# 0x0f 0x38 0x90-0xbf (FMA)
-90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
-91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
-92: vgatherdps/d Vx,Hx,Wx (66),(v)
-93: vgatherqps/d Vx,Hx,Wx (66),(v)
-94:
-95:
-96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
-97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
-98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
-99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
-9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
-9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
-9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
-9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
-a0: vpscatterdd/q Wx,Vx (66),(ev)
-a1: vpscatterqd/q Wx,Vx (66),(ev)
-a2: vscatterdps/d Wx,Vx (66),(ev)
-a3: vscatterqps/d Wx,Vx (66),(ev)
-a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
-a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
-a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
-a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
-ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
-ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
-af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
-b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
-b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
-b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
-b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
-b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
-ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
-bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
-bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
-bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
-be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
-bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
-# 0x0f 0x38 0xc0-0xff
-c4: vpconflictd/q Vx,Wx (66),(ev)
-c6: Grp18 (1A)
-c7: Grp19 (1A)
-c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
-c9: sha1msg1 Vdq,Wdq
-ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
-cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
-cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
-cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
-db: VAESIMC Vdq,Wdq (66),(v1)
-dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
-dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
-de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
-df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
-f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
-f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
-f2: ANDN Gy,By,Ey (v)
-f3: Grp17 (1A)
-f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
-f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
-f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
-EndTable
-
-Table: 3-byte opcode 2 (0x0f 0x3a)
-Referrer: 3-byte escape 2
-AVXcode: 3
-# 0x0f 0x3a 0x00-0xff
-00: vpermq Vqq,Wqq,Ib (66),(v)
-01: vpermpd Vqq,Wqq,Ib (66),(v)
-02: vpblendd Vx,Hx,Wx,Ib (66),(v)
-03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
-04: vpermilps Vx,Wx,Ib (66),(v)
-05: vpermilpd Vx,Wx,Ib (66),(v)
-06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
-07:
-08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
-09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
-0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
-0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
-0c: vblendps Vx,Hx,Wx,Ib (66)
-0d: vblendpd Vx,Hx,Wx,Ib (66)
-0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
-0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
-14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
-15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
-16: vpextrd/q Ey,Vdq,Ib (66),(v1)
-17: vextractps Ed,Vdq,Ib (66),(v1)
-18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
-19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
-1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
-1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
-1d: vcvtps2ph Wx,Vx,Ib (66),(v)
-1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
-1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
-20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
-21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
-22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
-23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
-26: vgetmantps/d Vx,Wx,Ib (66),(ev)
-27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
-30: kshiftrb/w Vk,Uk,Ib (66),(v)
-31: kshiftrd/q Vk,Uk,Ib (66),(v)
-32: kshiftlb/w Vk,Uk,Ib (66),(v)
-33: kshiftld/q Vk,Uk,Ib (66),(v)
-38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
-39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
-3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
-3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
-3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
-3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
-40: vdpps Vx,Hx,Wx,Ib (66)
-41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
-42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
-43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
-46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
-4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
-4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
-4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
-50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
-51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
-54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
-55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
-56: vreduceps/d Vx,Wx,Ib (66),(ev)
-57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
-60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
-61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
-62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
-63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
-66: vfpclassps/d Vk,Wx,Ib (66),(ev)
-67: vfpclassss/d Vk,Wx,Ib (66),(ev)
-cc: sha1rnds4 Vdq,Wdq,Ib
-df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
-f0: RORX Gy,Ey,Ib (F2),(v)
-EndTable
-
-GrpTable: Grp1
-0: ADD
-1: OR
-2: ADC
-3: SBB
-4: AND
-5: SUB
-6: XOR
-7: CMP
-EndTable
-
-GrpTable: Grp1A
-0: POP
-EndTable
-
-GrpTable: Grp2
-0: ROL
-1: ROR
-2: RCL
-3: RCR
-4: SHL/SAL
-5: SHR
-6:
-7: SAR
-EndTable
-
-GrpTable: Grp3_1
-0: TEST Eb,Ib
-1:
-2: NOT Eb
-3: NEG Eb
-4: MUL AL,Eb
-5: IMUL AL,Eb
-6: DIV AL,Eb
-7: IDIV AL,Eb
-EndTable
-
-GrpTable: Grp3_2
-0: TEST Ev,Iz
-1:
-2: NOT Ev
-3: NEG Ev
-4: MUL rAX,Ev
-5: IMUL rAX,Ev
-6: DIV rAX,Ev
-7: IDIV rAX,Ev
-EndTable
-
-GrpTable: Grp4
-0: INC Eb
-1: DEC Eb
-EndTable
-
-GrpTable: Grp5
-0: INC Ev
-1: DEC Ev
-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-2: CALLN Ev (f64)
-3: CALLF Ep
-4: JMPN Ev (f64)
-5: JMPF Mp
-6: PUSH Ev (d64)
-7:
-EndTable
-
-GrpTable: Grp6
-0: SLDT Rv/Mw
-1: STR Rv/Mw
-2: LLDT Ew
-3: LTR Ew
-4: VERR Ew
-5: VERW Ew
-EndTable
-
-GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
-3: LIDT Ms
-4: SMSW Mw/Rv
-5: rdpkru (110),(11B) | wrpkru (111),(11B)
-6: LMSW Ew
-7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
-EndTable
-
-GrpTable: Grp8
-4: BT
-5: BTS
-6: BTR
-7: BTC
-EndTable
-
-GrpTable: Grp9
-1: CMPXCHG8B/16B Mq/Mdq
-3: xrstors
-4: xsavec
-5: xsaves
-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
-7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
-EndTable
-
-GrpTable: Grp10
-EndTable
-
-# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
-GrpTable: Grp11A
-0: MOV Eb,Ib
-7: XABORT Ib (000),(11B)
-EndTable
-
-GrpTable: Grp11B
-0: MOV Eb,Iz
-7: XBEGIN Jz (000),(11B)
-EndTable
-
-GrpTable: Grp12
-2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
-4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
-6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp13
-0: vprord/q Hx,Wx,Ib (66),(ev)
-1: vprold/q Hx,Wx,Ib (66),(ev)
-2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
-4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
-6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp14
-2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
-3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
-6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
-7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp15
-0: fxsave | RDFSBASE Ry (F3),(11B)
-1: fxstor | RDGSBASE Ry (F3),(11B)
-2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
-3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
-4: XSAVE | ptwrite Ey (F3),(11B)
-5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
-7: clflush | clflushopt (66) | sfence (11B)
-EndTable
-
-GrpTable: Grp16
-0: prefetch NTA
-1: prefetch T0
-2: prefetch T1
-3: prefetch T2
-EndTable
-
-GrpTable: Grp17
-1: BLSR By,Ey (v)
-2: BLSMSK By,Ey (v)
-3: BLSI By,Ey (v)
-EndTable
-
-GrpTable: Grp18
-1: vgatherpf0dps/d Wx (66),(ev)
-2: vgatherpf1dps/d Wx (66),(ev)
-5: vscatterpf0dps/d Wx (66),(ev)
-6: vscatterpf1dps/d Wx (66),(ev)
-EndTable
-
-GrpTable: Grp19
-1: vgatherpf0qps/d Wx (66),(ev)
-2: vgatherpf1qps/d Wx (66),(ev)
-5: vscatterpf0qps/d Wx (66),(ev)
-6: vscatterpf1qps/d Wx (66),(ev)
-EndTable
-
-# AMD's Prefetch Group
-GrpTable: GrpP
-0: PREFETCH
-1: PREFETCHW
-EndTable
-
-GrpTable: GrpPDLK
-0: MONTMUL
-1: XSHA1
-2: XSHA2
-EndTable
-
-GrpTable: GrpRNG
-0: xstore-rng
-1: xcrypt-ecb
-2: xcrypt-cbc
-4: xcrypt-cfb
-5: xcrypt-ofb
-EndTable
diff --git a/tools/objtool/arch/x86/lib/inat.c b/tools/objtool/arch/x86/lib/inat.c
new file mode 100644 (file)
index 0000000..c1f01a8
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * x86 instruction attribute tables
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <asm/insn.h>
+
+/* Attribute tables are generated from opcode map */
+#include "inat-tables.c"
+
+/* Attribute search APIs */
+insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
+{
+       return inat_primary_table[opcode];
+}
+
+int inat_get_last_prefix_id(insn_byte_t last_pfx)
+{
+       insn_attr_t lpfx_attr;
+
+       lpfx_attr = inat_get_opcode_attribute(last_pfx);
+       return inat_last_prefix_id(lpfx_attr);
+}
+
+insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
+                                     insn_attr_t esc_attr)
+{
+       const insn_attr_t *table;
+       int n;
+
+       n = inat_escape_id(esc_attr);
+
+       table = inat_escape_tables[n][0];
+       if (!table)
+               return 0;
+       if (inat_has_variant(table[opcode]) && lpfx_id) {
+               table = inat_escape_tables[n][lpfx_id];
+               if (!table)
+                       return 0;
+       }
+       return table[opcode];
+}
+
+insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
+                                    insn_attr_t grp_attr)
+{
+       const insn_attr_t *table;
+       int n;
+
+       n = inat_group_id(grp_attr);
+
+       table = inat_group_tables[n][0];
+       if (!table)
+               return inat_group_common_attribute(grp_attr);
+       if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
+               table = inat_group_tables[n][lpfx_id];
+               if (!table)
+                       return inat_group_common_attribute(grp_attr);
+       }
+       return table[X86_MODRM_REG(modrm)] |
+              inat_group_common_attribute(grp_attr);
+}
+
+insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
+                                  insn_byte_t vex_p)
+{
+       const insn_attr_t *table;
+       if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
+               return 0;
+       /* At first, this checks the master table */
+       table = inat_avx_tables[vex_m][0];
+       if (!table)
+               return 0;
+       if (!inat_is_group(table[opcode]) && vex_p) {
+               /* If this is not a group, get attribute directly */
+               table = inat_avx_tables[vex_m][vex_p];
+               if (!table)
+                       return 0;
+       }
+       return table[opcode];
+}
+
diff --git a/tools/objtool/arch/x86/lib/insn.c b/tools/objtool/arch/x86/lib/insn.c
new file mode 100644 (file)
index 0000000..1088eb8
--- /dev/null
@@ -0,0 +1,606 @@
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004, 2009
+ */
+
+#ifdef __KERNEL__
+#include <linux/string.h>
+#else
+#include <string.h>
+#endif
+#include <asm/inat.h>
+#include <asm/insn.h>
+
+/* Verify next sizeof(t) bytes can be on the same instruction */
+#define validate_next(t, insn, n)      \
+       ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
+
+#define __get_next(t, insn)    \
+       ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+
+#define __peek_nbyte_next(t, insn, n)  \
+       ({ t r = *(t*)((insn)->next_byte + n); r; })
+
+#define get_next(t, insn)      \
+       ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+
+#define peek_nbyte_next(t, insn, n)    \
+       ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
+
+#define peek_next(t, insn)     peek_nbyte_next(t, insn, 0)
+
+/**
+ * insn_init() - initialize struct insn
+ * @insn:      &struct insn to be initialized
+ * @kaddr:     address (in kernel memory) of instruction (or copy thereof)
+ * @x86_64:    !0 for 64-bit kernel or 64-bit app
+ */
+void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+{
+       /*
+        * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
+        * even if the input buffer is long enough to hold them.
+        */
+       if (buf_len > MAX_INSN_SIZE)
+               buf_len = MAX_INSN_SIZE;
+
+       memset(insn, 0, sizeof(*insn));
+       insn->kaddr = kaddr;
+       insn->end_kaddr = kaddr + buf_len;
+       insn->next_byte = kaddr;
+       insn->x86_64 = x86_64 ? 1 : 0;
+       insn->opnd_bytes = 4;
+       if (x86_64)
+               insn->addr_bytes = 8;
+       else
+               insn->addr_bytes = 4;
+}
+
+/**
+ * insn_get_prefixes - scan x86 instruction prefix bytes
+ * @insn:      &struct insn containing instruction
+ *
+ * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
+ * to point to the (first) opcode.  No effect if @insn->prefixes.got
+ * is already set.
+ */
+void insn_get_prefixes(struct insn *insn)
+{
+       struct insn_field *prefixes = &insn->prefixes;
+       insn_attr_t attr;
+       insn_byte_t b, lb;
+       int i, nb;
+
+       if (prefixes->got)
+               return;
+
+       nb = 0;
+       lb = 0;
+       b = peek_next(insn_byte_t, insn);
+       attr = inat_get_opcode_attribute(b);
+       while (inat_is_legacy_prefix(attr)) {
+               /* Skip if same prefix */
+               for (i = 0; i < nb; i++)
+                       if (prefixes->bytes[i] == b)
+                               goto found;
+               if (nb == 4)
+                       /* Invalid instruction */
+                       break;
+               prefixes->bytes[nb++] = b;
+               if (inat_is_address_size_prefix(attr)) {
+                       /* address size switches 2/4 or 4/8 */
+                       if (insn->x86_64)
+                               insn->addr_bytes ^= 12;
+                       else
+                               insn->addr_bytes ^= 6;
+               } else if (inat_is_operand_size_prefix(attr)) {
+                       /* oprand size switches 2/4 */
+                       insn->opnd_bytes ^= 6;
+               }
+found:
+               prefixes->nbytes++;
+               insn->next_byte++;
+               lb = b;
+               b = peek_next(insn_byte_t, insn);
+               attr = inat_get_opcode_attribute(b);
+       }
+       /* Set the last prefix */
+       if (lb && lb != insn->prefixes.bytes[3]) {
+               if (unlikely(insn->prefixes.bytes[3])) {
+                       /* Swap the last prefix */
+                       b = insn->prefixes.bytes[3];
+                       for (i = 0; i < nb; i++)
+                               if (prefixes->bytes[i] == lb)
+                                       prefixes->bytes[i] = b;
+               }
+               insn->prefixes.bytes[3] = lb;
+       }
+
+       /* Decode REX prefix */
+       if (insn->x86_64) {
+               b = peek_next(insn_byte_t, insn);
+               attr = inat_get_opcode_attribute(b);
+               if (inat_is_rex_prefix(attr)) {
+                       insn->rex_prefix.value = b;
+                       insn->rex_prefix.nbytes = 1;
+                       insn->next_byte++;
+                       if (X86_REX_W(b))
+                               /* REX.W overrides opnd_size */
+                               insn->opnd_bytes = 8;
+               }
+       }
+       insn->rex_prefix.got = 1;
+
+       /* Decode VEX prefix */
+       b = peek_next(insn_byte_t, insn);
+       attr = inat_get_opcode_attribute(b);
+       if (inat_is_vex_prefix(attr)) {
+               insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
+               if (!insn->x86_64) {
+                       /*
+                        * In 32-bits mode, if the [7:6] bits (mod bits of
+                        * ModRM) on the second byte are not 11b, it is
+                        * LDS or LES or BOUND.
+                        */
+                       if (X86_MODRM_MOD(b2) != 3)
+                               goto vex_end;
+               }
+               insn->vex_prefix.bytes[0] = b;
+               insn->vex_prefix.bytes[1] = b2;
+               if (inat_is_evex_prefix(attr)) {
+                       b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+                       insn->vex_prefix.bytes[2] = b2;
+                       b2 = peek_nbyte_next(insn_byte_t, insn, 3);
+                       insn->vex_prefix.bytes[3] = b2;
+                       insn->vex_prefix.nbytes = 4;
+                       insn->next_byte += 4;
+                       if (insn->x86_64 && X86_VEX_W(b2))
+                               /* VEX.W overrides opnd_size */
+                               insn->opnd_bytes = 8;
+               } else if (inat_is_vex3_prefix(attr)) {
+                       b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+                       insn->vex_prefix.bytes[2] = b2;
+                       insn->vex_prefix.nbytes = 3;
+                       insn->next_byte += 3;
+                       if (insn->x86_64 && X86_VEX_W(b2))
+                               /* VEX.W overrides opnd_size */
+                               insn->opnd_bytes = 8;
+               } else {
+                       /*
+                        * For VEX2, fake VEX3-like byte#2.
+                        * Makes it easier to decode vex.W, vex.vvvv,
+                        * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
+                        */
+                       insn->vex_prefix.bytes[2] = b2 & 0x7f;
+                       insn->vex_prefix.nbytes = 2;
+                       insn->next_byte += 2;
+               }
+       }
+vex_end:
+       insn->vex_prefix.got = 1;
+
+       prefixes->got = 1;
+
+err_out:
+       return;
+}
+
+/**
+ * insn_get_opcode - collect opcode(s)
+ * @insn:      &struct insn containing instruction
+ *
+ * Populates @insn->opcode, updates @insn->next_byte to point past the
+ * opcode byte(s), and set @insn->attr (except for groups).
+ * If necessary, first collects any preceding (prefix) bytes.
+ * Sets @insn->opcode.value = opcode1.  No effect if @insn->opcode.got
+ * is already 1.
+ */
+void insn_get_opcode(struct insn *insn)
+{
+       struct insn_field *opcode = &insn->opcode;
+       insn_byte_t op;
+       int pfx_id;
+       if (opcode->got)
+               return;
+       if (!insn->prefixes.got)
+               insn_get_prefixes(insn);
+
+       /* Get first opcode */
+       op = get_next(insn_byte_t, insn);
+       opcode->bytes[0] = op;
+       opcode->nbytes = 1;
+
+       /* Check if there is VEX prefix or not */
+       if (insn_is_avx(insn)) {
+               insn_byte_t m, p;
+               m = insn_vex_m_bits(insn);
+               p = insn_vex_p_bits(insn);
+               insn->attr = inat_get_avx_attribute(op, m, p);
+               if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
+                   (!inat_accept_vex(insn->attr) &&
+                    !inat_is_group(insn->attr)))
+                       insn->attr = 0; /* This instruction is bad */
+               goto end;       /* VEX has only 1 byte for opcode */
+       }
+
+       insn->attr = inat_get_opcode_attribute(op);
+       while (inat_is_escape(insn->attr)) {
+               /* Get escaped opcode */
+               op = get_next(insn_byte_t, insn);
+               opcode->bytes[opcode->nbytes++] = op;
+               pfx_id = insn_last_prefix_id(insn);
+               insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
+       }
+       if (inat_must_vex(insn->attr))
+               insn->attr = 0; /* This instruction is bad */
+end:
+       opcode->got = 1;
+
+err_out:
+       return;
+}
+
+/**
+ * insn_get_modrm - collect ModRM byte, if any
+ * @insn:      &struct insn containing instruction
+ *
+ * Populates @insn->modrm and updates @insn->next_byte to point past the
+ * ModRM byte, if any.  If necessary, first collects the preceding bytes
+ * (prefixes and opcode(s)).  No effect if @insn->modrm.got is already 1.
+ */
+void insn_get_modrm(struct insn *insn)
+{
+       struct insn_field *modrm = &insn->modrm;
+       insn_byte_t pfx_id, mod;
+       if (modrm->got)
+               return;
+       if (!insn->opcode.got)
+               insn_get_opcode(insn);
+
+       if (inat_has_modrm(insn->attr)) {
+               mod = get_next(insn_byte_t, insn);
+               modrm->value = mod;
+               modrm->nbytes = 1;
+               if (inat_is_group(insn->attr)) {
+                       pfx_id = insn_last_prefix_id(insn);
+                       insn->attr = inat_get_group_attribute(mod, pfx_id,
+                                                             insn->attr);
+                       if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+                               insn->attr = 0; /* This is bad */
+               }
+       }
+
+       if (insn->x86_64 && inat_is_force64(insn->attr))
+               insn->opnd_bytes = 8;
+       modrm->got = 1;
+
+err_out:
+       return;
+}
+
+
+/**
+ * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
+ * @insn:      &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte.  No effect if @insn->x86_64 is 0.
+ */
+int insn_rip_relative(struct insn *insn)
+{
+       struct insn_field *modrm = &insn->modrm;
+
+       if (!insn->x86_64)
+               return 0;
+       if (!modrm->got)
+               insn_get_modrm(insn);
+       /*
+        * For rip-relative instructions, the mod field (top 2 bits)
+        * is zero and the r/m field (bottom 3 bits) is 0x5.
+        */
+       return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
+}
+
+/**
+ * insn_get_sib() - Get the SIB byte of instruction
+ * @insn:      &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte.
+ */
+void insn_get_sib(struct insn *insn)
+{
+       insn_byte_t modrm;
+
+       if (insn->sib.got)
+               return;
+       if (!insn->modrm.got)
+               insn_get_modrm(insn);
+       if (insn->modrm.nbytes) {
+               modrm = (insn_byte_t)insn->modrm.value;
+               if (insn->addr_bytes != 2 &&
+                   X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
+                       insn->sib.value = get_next(insn_byte_t, insn);
+                       insn->sib.nbytes = 1;
+               }
+       }
+       insn->sib.got = 1;
+
+err_out:
+       return;
+}
+
+
+/**
+ * insn_get_displacement() - Get the displacement of instruction
+ * @insn:      &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * SIB byte.
+ * Displacement value is sign-expanded.
+ */
+void insn_get_displacement(struct insn *insn)
+{
+       insn_byte_t mod, rm, base;
+
+       if (insn->displacement.got)
+               return;
+       if (!insn->sib.got)
+               insn_get_sib(insn);
+       if (insn->modrm.nbytes) {
+               /*
+                * Interpreting the modrm byte:
+                * mod = 00 - no displacement fields (exceptions below)
+                * mod = 01 - 1-byte displacement field
+                * mod = 10 - displacement field is 4 bytes, or 2 bytes if
+                *      address size = 2 (0x67 prefix in 32-bit mode)
+                * mod = 11 - no memory operand
+                *
+                * If address size = 2...
+                * mod = 00, r/m = 110 - displacement field is 2 bytes
+                *
+                * If address size != 2...
+                * mod != 11, r/m = 100 - SIB byte exists
+                * mod = 00, SIB base = 101 - displacement field is 4 bytes
+                * mod = 00, r/m = 101 - rip-relative addressing, displacement
+                *      field is 4 bytes
+                */
+               mod = X86_MODRM_MOD(insn->modrm.value);
+               rm = X86_MODRM_RM(insn->modrm.value);
+               base = X86_SIB_BASE(insn->sib.value);
+               if (mod == 3)
+                       goto out;
+               if (mod == 1) {
+                       insn->displacement.value = get_next(signed char, insn);
+                       insn->displacement.nbytes = 1;
+               } else if (insn->addr_bytes == 2) {
+                       if ((mod == 0 && rm == 6) || mod == 2) {
+                               insn->displacement.value =
+                                        get_next(short, insn);
+                               insn->displacement.nbytes = 2;
+                       }
+               } else {
+                       if ((mod == 0 && rm == 5) || mod == 2 ||
+                           (mod == 0 && base == 5)) {
+                               insn->displacement.value = get_next(int, insn);
+                               insn->displacement.nbytes = 4;
+                       }
+               }
+       }
+out:
+       insn->displacement.got = 1;
+
+err_out:
+       return;
+}
+
+/* Decode moffset16/32/64. Return 0 if failed */
+static int __get_moffset(struct insn *insn)
+{
+       switch (insn->addr_bytes) {
+       case 2:
+               insn->moffset1.value = get_next(short, insn);
+               insn->moffset1.nbytes = 2;
+               break;
+       case 4:
+               insn->moffset1.value = get_next(int, insn);
+               insn->moffset1.nbytes = 4;
+               break;
+       case 8:
+               insn->moffset1.value = get_next(int, insn);
+               insn->moffset1.nbytes = 4;
+               insn->moffset2.value = get_next(int, insn);
+               insn->moffset2.nbytes = 4;
+               break;
+       default:        /* opnd_bytes must be modified manually */
+               goto err_out;
+       }
+       insn->moffset1.got = insn->moffset2.got = 1;
+
+       return 1;
+
+err_out:
+       return 0;
+}
+
+/* Decode imm v32(Iz). Return 0 if failed */
+static int __get_immv32(struct insn *insn)
+{
+       switch (insn->opnd_bytes) {
+       case 2:
+               insn->immediate.value = get_next(short, insn);
+               insn->immediate.nbytes = 2;
+               break;
+       case 4:
+       case 8:
+               insn->immediate.value = get_next(int, insn);
+               insn->immediate.nbytes = 4;
+               break;
+       default:        /* opnd_bytes must be modified manually */
+               goto err_out;
+       }
+
+       return 1;
+
+err_out:
+       return 0;
+}
+
+/* Decode imm v64(Iv/Ov), Return 0 if failed */
+static int __get_immv(struct insn *insn)
+{
+       switch (insn->opnd_bytes) {
+       case 2:
+               insn->immediate1.value = get_next(short, insn);
+               insn->immediate1.nbytes = 2;
+               break;
+       case 4:
+               insn->immediate1.value = get_next(int, insn);
+               insn->immediate1.nbytes = 4;
+               break;
+       case 8:
+               insn->immediate1.value = get_next(int, insn);
+               insn->immediate1.nbytes = 4;
+               insn->immediate2.value = get_next(int, insn);
+               insn->immediate2.nbytes = 4;
+               break;
+       default:        /* opnd_bytes must be modified manually */
+               goto err_out;
+       }
+       insn->immediate1.got = insn->immediate2.got = 1;
+
+       return 1;
+err_out:
+       return 0;
+}
+
+/* Decode ptr16:16/32(Ap) */
+static int __get_immptr(struct insn *insn)
+{
+       switch (insn->opnd_bytes) {
+       case 2:
+               insn->immediate1.value = get_next(short, insn);
+               insn->immediate1.nbytes = 2;
+               break;
+       case 4:
+               insn->immediate1.value = get_next(int, insn);
+               insn->immediate1.nbytes = 4;
+               break;
+       case 8:
+               /* ptr16:64 is not exist (no segment) */
+               return 0;
+       default:        /* opnd_bytes must be modified manually */
+               goto err_out;
+       }
+       insn->immediate2.value = get_next(unsigned short, insn);
+       insn->immediate2.nbytes = 2;
+       insn->immediate1.got = insn->immediate2.got = 1;
+
+       return 1;
+err_out:
+       return 0;
+}
+
+/**
+ * insn_get_immediate() - Get the immediates of instruction
+ * @insn:      &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * displacement bytes.
+ * Basically, most of immediates are sign-expanded. Unsigned-value can be
+ * get by bit masking with ((1 << (nbytes * 8)) - 1)
+ */
+void insn_get_immediate(struct insn *insn)
+{
+       if (insn->immediate.got)
+               return;
+       if (!insn->displacement.got)
+               insn_get_displacement(insn);
+
+       if (inat_has_moffset(insn->attr)) {
+               if (!__get_moffset(insn))
+                       goto err_out;
+               goto done;
+       }
+
+       if (!inat_has_immediate(insn->attr))
+               /* no immediates */
+               goto done;
+
+       switch (inat_immediate_size(insn->attr)) {
+       case INAT_IMM_BYTE:
+               insn->immediate.value = get_next(signed char, insn);
+               insn->immediate.nbytes = 1;
+               break;
+       case INAT_IMM_WORD:
+               insn->immediate.value = get_next(short, insn);
+               insn->immediate.nbytes = 2;
+               break;
+       case INAT_IMM_DWORD:
+               insn->immediate.value = get_next(int, insn);
+               insn->immediate.nbytes = 4;
+               break;
+       case INAT_IMM_QWORD:
+               insn->immediate1.value = get_next(int, insn);
+               insn->immediate1.nbytes = 4;
+               insn->immediate2.value = get_next(int, insn);
+               insn->immediate2.nbytes = 4;
+               break;
+       case INAT_IMM_PTR:
+               if (!__get_immptr(insn))
+                       goto err_out;
+               break;
+       case INAT_IMM_VWORD32:
+               if (!__get_immv32(insn))
+                       goto err_out;
+               break;
+       case INAT_IMM_VWORD:
+               if (!__get_immv(insn))
+                       goto err_out;
+               break;
+       default:
+               /* Here, insn must have an immediate, but failed */
+               goto err_out;
+       }
+       if (inat_has_second_immediate(insn->attr)) {
+               insn->immediate2.value = get_next(signed char, insn);
+               insn->immediate2.nbytes = 1;
+       }
+done:
+       insn->immediate.got = 1;
+
+err_out:
+       return;
+}
+
+/**
+ * insn_get_length() - Get the length of instruction
+ * @insn:      &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * immediates bytes.
+ */
+void insn_get_length(struct insn *insn)
+{
+       if (insn->length)
+               return;
+       if (!insn->immediate.got)
+               insn_get_immediate(insn);
+       insn->length = (unsigned char)((unsigned long)insn->next_byte
+                                    - (unsigned long)insn->kaddr);
+}
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
new file mode 100644 (file)
index 0000000..12e3771
--- /dev/null
@@ -0,0 +1,1063 @@
+# x86 Opcode Maps
+#
+# This is (mostly) based on following documentations.
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
+#   (#326018-047US, June 2013)
+#
+#<Opcode maps>
+# Table: table-name
+# Referrer: escaped-name
+# AVXcode: avx-code
+# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# (or)
+# opcode: escape # escaped-name
+# EndTable
+#
+# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
+# mnemonics that begin with lowercase 'k' accept a VEX prefix
+#
+#<group maps>
+# GrpTable: GrpXXX
+# reg:  mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# EndTable
+#
+# AVX Superscripts
+#  (ev): this opcode requires EVEX prefix.
+#  (evo): this opcode is changed by EVEX prefix (EVEX opcode)
+#  (v): this opcode requires VEX prefix.
+#  (v1): this opcode only supports 128bit VEX.
+#
+# Last Prefix Superscripts
+#  - (66): the last prefix is 0x66
+#  - (F3): the last prefix is 0xF3
+#  - (F2): the last prefix is 0xF2
+#  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+#  - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+
+Table: one byte opcode
+Referrer:
+AVXcode:
+# 0x00 - 0x0f
+00: ADD Eb,Gb
+01: ADD Ev,Gv
+02: ADD Gb,Eb
+03: ADD Gv,Ev
+04: ADD AL,Ib
+05: ADD rAX,Iz
+06: PUSH ES (i64)
+07: POP ES (i64)
+08: OR Eb,Gb
+09: OR Ev,Gv
+0a: OR Gb,Eb
+0b: OR Gv,Ev
+0c: OR AL,Ib
+0d: OR rAX,Iz
+0e: PUSH CS (i64)
+0f: escape # 2-byte escape
+# 0x10 - 0x1f
+10: ADC Eb,Gb
+11: ADC Ev,Gv
+12: ADC Gb,Eb
+13: ADC Gv,Ev
+14: ADC AL,Ib
+15: ADC rAX,Iz
+16: PUSH SS (i64)
+17: POP SS (i64)
+18: SBB Eb,Gb
+19: SBB Ev,Gv
+1a: SBB Gb,Eb
+1b: SBB Gv,Ev
+1c: SBB AL,Ib
+1d: SBB rAX,Iz
+1e: PUSH DS (i64)
+1f: POP DS (i64)
+# 0x20 - 0x2f
+20: AND Eb,Gb
+21: AND Ev,Gv
+22: AND Gb,Eb
+23: AND Gv,Ev
+24: AND AL,Ib
+25: AND rAx,Iz
+26: SEG=ES (Prefix)
+27: DAA (i64)
+28: SUB Eb,Gb
+29: SUB Ev,Gv
+2a: SUB Gb,Eb
+2b: SUB Gv,Ev
+2c: SUB AL,Ib
+2d: SUB rAX,Iz
+2e: SEG=CS (Prefix)
+2f: DAS (i64)
+# 0x30 - 0x3f
+30: XOR Eb,Gb
+31: XOR Ev,Gv
+32: XOR Gb,Eb
+33: XOR Gv,Ev
+34: XOR AL,Ib
+35: XOR rAX,Iz
+36: SEG=SS (Prefix)
+37: AAA (i64)
+38: CMP Eb,Gb
+39: CMP Ev,Gv
+3a: CMP Gb,Eb
+3b: CMP Gv,Ev
+3c: CMP AL,Ib
+3d: CMP rAX,Iz
+3e: SEG=DS (Prefix)
+3f: AAS (i64)
+# 0x40 - 0x4f
+40: INC eAX (i64) | REX (o64)
+41: INC eCX (i64) | REX.B (o64)
+42: INC eDX (i64) | REX.X (o64)
+43: INC eBX (i64) | REX.XB (o64)
+44: INC eSP (i64) | REX.R (o64)
+45: INC eBP (i64) | REX.RB (o64)
+46: INC eSI (i64) | REX.RX (o64)
+47: INC eDI (i64) | REX.RXB (o64)
+48: DEC eAX (i64) | REX.W (o64)
+49: DEC eCX (i64) | REX.WB (o64)
+4a: DEC eDX (i64) | REX.WX (o64)
+4b: DEC eBX (i64) | REX.WXB (o64)
+4c: DEC eSP (i64) | REX.WR (o64)
+4d: DEC eBP (i64) | REX.WRB (o64)
+4e: DEC eSI (i64) | REX.WRX (o64)
+4f: DEC eDI (i64) | REX.WRXB (o64)
+# 0x50 - 0x5f
+50: PUSH rAX/r8 (d64)
+51: PUSH rCX/r9 (d64)
+52: PUSH rDX/r10 (d64)
+53: PUSH rBX/r11 (d64)
+54: PUSH rSP/r12 (d64)
+55: PUSH rBP/r13 (d64)
+56: PUSH rSI/r14 (d64)
+57: PUSH rDI/r15 (d64)
+58: POP rAX/r8 (d64)
+59: POP rCX/r9 (d64)
+5a: POP rDX/r10 (d64)
+5b: POP rBX/r11 (d64)
+5c: POP rSP/r12 (d64)
+5d: POP rBP/r13 (d64)
+5e: POP rSI/r14 (d64)
+5f: POP rDI/r15 (d64)
+# 0x60 - 0x6f
+60: PUSHA/PUSHAD (i64)
+61: POPA/POPAD (i64)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
+64: SEG=FS (Prefix)
+65: SEG=GS (Prefix)
+66: Operand-Size (Prefix)
+67: Address-Size (Prefix)
+68: PUSH Iz (d64)
+69: IMUL Gv,Ev,Iz
+6a: PUSH Ib (d64)
+6b: IMUL Gv,Ev,Ib
+6c: INS/INSB Yb,DX
+6d: INS/INSW/INSD Yz,DX
+6e: OUTS/OUTSB DX,Xb
+6f: OUTS/OUTSW/OUTSD DX,Xz
+# 0x70 - 0x7f
+70: JO Jb
+71: JNO Jb
+72: JB/JNAE/JC Jb
+73: JNB/JAE/JNC Jb
+74: JZ/JE Jb
+75: JNZ/JNE Jb
+76: JBE/JNA Jb
+77: JNBE/JA Jb
+78: JS Jb
+79: JNS Jb
+7a: JP/JPE Jb
+7b: JNP/JPO Jb
+7c: JL/JNGE Jb
+7d: JNL/JGE Jb
+7e: JLE/JNG Jb
+7f: JNLE/JG Jb
+# 0x80 - 0x8f
+80: Grp1 Eb,Ib (1A)
+81: Grp1 Ev,Iz (1A)
+82: Grp1 Eb,Ib (1A),(i64)
+83: Grp1 Ev,Ib (1A)
+84: TEST Eb,Gb
+85: TEST Ev,Gv
+86: XCHG Eb,Gb
+87: XCHG Ev,Gv
+88: MOV Eb,Gb
+89: MOV Ev,Gv
+8a: MOV Gb,Eb
+8b: MOV Gv,Ev
+8c: MOV Ev,Sw
+8d: LEA Gv,M
+8e: MOV Sw,Ew
+8f: Grp1A (1A) | POP Ev (d64)
+# 0x90 - 0x9f
+90: NOP | PAUSE (F3) | XCHG r8,rAX
+91: XCHG rCX/r9,rAX
+92: XCHG rDX/r10,rAX
+93: XCHG rBX/r11,rAX
+94: XCHG rSP/r12,rAX
+95: XCHG rBP/r13,rAX
+96: XCHG rSI/r14,rAX
+97: XCHG rDI/r15,rAX
+98: CBW/CWDE/CDQE
+99: CWD/CDQ/CQO
+9a: CALLF Ap (i64)
+9b: FWAIT/WAIT
+9c: PUSHF/D/Q Fv (d64)
+9d: POPF/D/Q Fv (d64)
+9e: SAHF
+9f: LAHF
+# 0xa0 - 0xaf
+a0: MOV AL,Ob
+a1: MOV rAX,Ov
+a2: MOV Ob,AL
+a3: MOV Ov,rAX
+a4: MOVS/B Yb,Xb
+a5: MOVS/W/D/Q Yv,Xv
+a6: CMPS/B Xb,Yb
+a7: CMPS/W/D Xv,Yv
+a8: TEST AL,Ib
+a9: TEST rAX,Iz
+aa: STOS/B Yb,AL
+ab: STOS/W/D/Q Yv,rAX
+ac: LODS/B AL,Xb
+ad: LODS/W/D/Q rAX,Xv
+ae: SCAS/B AL,Yb
+# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+# next instruction but Yv is correct
+af: SCAS/W/D/Q rAX,Yv
+# 0xb0 - 0xbf
+b0: MOV AL/R8L,Ib
+b1: MOV CL/R9L,Ib
+b2: MOV DL/R10L,Ib
+b3: MOV BL/R11L,Ib
+b4: MOV AH/R12L,Ib
+b5: MOV CH/R13L,Ib
+b6: MOV DH/R14L,Ib
+b7: MOV BH/R15L,Ib
+b8: MOV rAX/r8,Iv
+b9: MOV rCX/r9,Iv
+ba: MOV rDX/r10,Iv
+bb: MOV rBX/r11,Iv
+bc: MOV rSP/r12,Iv
+bd: MOV rBP/r13,Iv
+be: MOV rSI/r14,Iv
+bf: MOV rDI/r15,Iv
+# 0xc0 - 0xcf
+c0: Grp2 Eb,Ib (1A)
+c1: Grp2 Ev,Ib (1A)
+c2: RETN Iw (f64)
+c3: RETN
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c6: Grp11A Eb,Ib (1A)
+c7: Grp11B Ev,Iz (1A)
+c8: ENTER Iw,Ib
+c9: LEAVE (d64)
+ca: RETF Iw
+cb: RETF
+cc: INT3
+cd: INT Ib
+ce: INTO (i64)
+cf: IRET/D/Q
+# 0xd0 - 0xdf
+d0: Grp2 Eb,1 (1A)
+d1: Grp2 Ev,1 (1A)
+d2: Grp2 Eb,CL (1A)
+d3: Grp2 Ev,CL (1A)
+d4: AAM Ib (i64)
+d5: AAD Ib (i64)
+d6:
+d7: XLAT/XLATB
+d8: ESC
+d9: ESC
+da: ESC
+db: ESC
+dc: ESC
+dd: ESC
+de: ESC
+df: ESC
+# 0xe0 - 0xef
+# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
+e0: LOOPNE/LOOPNZ Jb (f64)
+e1: LOOPE/LOOPZ Jb (f64)
+e2: LOOP Jb (f64)
+e3: JrCXZ Jb (f64)
+e4: IN AL,Ib
+e5: IN eAX,Ib
+e6: OUT Ib,AL
+e7: OUT Ib,eAX
+# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
+# in "near" jumps and calls is 16-bit. For CALL,
+# push of return address is 16-bit wide, RSP is decremented by 2
+# but is not truncated to 16 bits, unlike RIP.
+e8: CALL Jz (f64)
+e9: JMP-near Jz (f64)
+ea: JMP-far Ap (i64)
+eb: JMP-short Jb (f64)
+ec: IN AL,DX
+ed: IN eAX,DX
+ee: OUT DX,AL
+ef: OUT DX,eAX
+# 0xf0 - 0xff
+f0: LOCK (Prefix)
+f1:
+f2: REPNE (Prefix) | XACQUIRE (Prefix)
+f3: REP/REPE (Prefix) | XRELEASE (Prefix)
+f4: HLT
+f5: CMC
+f6: Grp3_1 Eb (1A)
+f7: Grp3_2 Ev (1A)
+f8: CLC
+f9: STC
+fa: CLI
+fb: STI
+fc: CLD
+fd: STD
+fe: Grp4 (1A)
+ff: Grp5 (1A)
+EndTable
+
+Table: 2-byte opcode (0x0f)
+Referrer: 2-byte escape
+AVXcode: 1
+# 0x0f 0x00-0x0f
+00: Grp6 (1A)
+01: Grp7 (1A)
+02: LAR Gv,Ew
+03: LSL Gv,Ew
+04:
+05: SYSCALL (o64)
+06: CLTS
+07: SYSRET (o64)
+08: INVD
+09: WBINVD
+0a:
+0b: UD2 (1B)
+0c:
+# AMD's prefetch group. Intel supports prefetchw(/1) only.
+0d: GrpP
+0e: FEMMS
+# 3DNow! uses the last imm byte as opcode extension.
+0f: 3DNow! Pq,Qq,Ib
+# 0x0f 0x10-0x1f
+# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+# Reference A.1
+10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
+18: Grp16 (1A)
+19:
+# Intel SDM opcode map does not list MPX instructions. For now using Gv for
+# bnd registers and Ev for everything else is OK because the instruction
+# decoder does not use the information except as an indication that there is
+# a ModR/M byte.
+1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
+1c:
+1d:
+1e:
+1f: NOP Ev
+# 0x0f 0x20-0x2f
+20: MOV Rd,Cd
+21: MOV Rd,Dd
+22: MOV Cd,Rd
+23: MOV Dd,Rd
+24:
+25:
+26:
+27:
+28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+2e: vucomiss Vss,Wss (v1) | vucomisd  Vsd,Wsd (66),(v1)
+2f: vcomiss Vss,Wss (v1) | vcomisd  Vsd,Wsd (66),(v1)
+# 0x0f 0x30-0x3f
+30: WRMSR
+31: RDTSC
+32: RDMSR
+33: RDPMC
+34: SYSENTER
+35: SYSEXIT
+36:
+37: GETSEC
+38: escape # 3-byte escape 1
+39:
+3a: escape # 3-byte escape 2
+3b:
+3c:
+3d:
+3e:
+3f:
+# 0x0f 0x40-0x4f
+40: CMOVO Gv,Ev
+41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
+42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
+43: CMOVAE/NB/NC Gv,Ev
+44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
+45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
+46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
+47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
+48: CMOVS Gv,Ev
+49: CMOVNS Gv,Ev
+4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
+4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
+4c: CMOVL/NGE Gv,Ev
+4d: CMOVNL/GE Gv,Ev
+4e: CMOVLE/NG Gv,Ev
+4f: CMOVNLE/G Gv,Ev
+# 0x0f 0x50-0x5f
+50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
+# 0x0f 0x60-0x6f
+60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
+# 0x0f 0x70-0x7f
+70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
+71: Grp12 (1A)
+72: Grp13 (1A)
+73: Grp14 (1A)
+74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+77: emms | vzeroupper | vzeroall
+78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
+79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
+7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
+7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
+7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
+# 0x0f 0x80-0x8f
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+80: JO Jz (f64)
+81: JNO Jz (f64)
+82: JB/JC/JNAE Jz (f64)
+83: JAE/JNB/JNC Jz (f64)
+84: JE/JZ Jz (f64)
+85: JNE/JNZ Jz (f64)
+86: JBE/JNA Jz (f64)
+87: JA/JNBE Jz (f64)
+88: JS Jz (f64)
+89: JNS Jz (f64)
+8a: JP/JPE Jz (f64)
+8b: JNP/JPO Jz (f64)
+8c: JL/JNGE Jz (f64)
+8d: JNL/JGE Jz (f64)
+8e: JLE/JNG Jz (f64)
+8f: JNLE/JG Jz (f64)
+# 0x0f 0x90-0x9f
+90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
+91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
+92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
+93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
+94: SETE/Z Eb
+95: SETNE/NZ Eb
+96: SETBE/NA Eb
+97: SETA/NBE Eb
+98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
+99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
+9a: SETP/PE Eb
+9b: SETNP/PO Eb
+9c: SETL/NGE Eb
+9d: SETNL/GE Eb
+9e: SETLE/NG Eb
+9f: SETNLE/G Eb
+# 0x0f 0xa0-0xaf
+a0: PUSH FS (d64)
+a1: POP FS (d64)
+a2: CPUID
+a3: BT Ev,Gv
+a4: SHLD Ev,Gv,Ib
+a5: SHLD Ev,Gv,CL
+a6: GrpPDLK
+a7: GrpRNG
+a8: PUSH GS (d64)
+a9: POP GS (d64)
+aa: RSM
+ab: BTS Ev,Gv
+ac: SHRD Ev,Gv,Ib
+ad: SHRD Ev,Gv,CL
+ae: Grp15 (1A),(1C)
+af: IMUL Gv,Ev
+# 0x0f 0xb0-0xbf
+b0: CMPXCHG Eb,Gb
+b1: CMPXCHG Ev,Gv
+b2: LSS Gv,Mp
+b3: BTR Ev,Gv
+b4: LFS Gv,Mp
+b5: LGS Gv,Mp
+b6: MOVZX Gv,Eb
+b7: MOVZX Gv,Ew
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
+b9: Grp10 (1A)
+ba: Grp8 Ev,Ib (1A)
+bb: BTC Ev,Gv
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
+be: MOVSX Gv,Eb
+bf: MOVSX Gv,Ew
+# 0x0f 0xc0-0xcf
+c0: XADD Eb,Gb
+c1: XADD Ev,Gv
+c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+c3: movnti My,Gy
+c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
+c7: Grp9 (1A)
+c8: BSWAP RAX/EAX/R8/R8D
+c9: BSWAP RCX/ECX/R9/R9D
+ca: BSWAP RDX/EDX/R10/R10D
+cb: BSWAP RBX/EBX/R11/R11D
+cc: BSWAP RSP/ESP/R12/R12D
+cd: BSWAP RBP/EBP/R13/R13D
+ce: BSWAP RSI/ESI/R14/R14D
+cf: BSWAP RDI/EDI/R15/R15D
+# 0x0f 0xd0-0xdf
+d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
+dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0xe0-0xef
+e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
+e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
+ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0xf0-0xff
+f0: vlddqu Vx,Mx (F2)
+f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
+ff:
+EndTable
+
+Table: 3-byte opcode 1 (0x0f 0x38)
+Referrer: 3-byte escape 1
+AVXcode: 2
+# 0x0f 0x38 0x00-0x0f
+00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+0c: vpermilps Vx,Hx,Wx (66),(v)
+0d: vpermilpd Vx,Hx,Wx (66),(v)
+0e: vtestps Vx,Wx (66),(v)
+0f: vtestpd Vx,Wx (66),(v)
+# 0x0f 0x38 0x10-0x1f
+10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
+11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
+12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
+13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
+14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
+15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
+16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
+17: vptest Vx,Wx (66)
+18: vbroadcastss Vx,Wd (66),(v)
+19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
+1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
+1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
+1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
+1f: vpabsq Vx,Wx (66),(ev)
+# 0x0f 0x38 0x20-0x2f
+20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
+21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
+24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
+26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
+27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
+28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
+2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
+2b: vpackusdw Vx,Hx,Wx (66),(v1)
+2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
+2e: vmaskmovps Mx,Hx,Vx (66),(v)
+2f: vmaskmovpd Mx,Hx,Vx (66),(v)
+# 0x0f 0x38 0x30-0x3f
+30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
+31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
+34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
+36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
+37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
+39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
+3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
+3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
+3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
+3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0x38 0x40-0x8f
+40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
+41: vphminposuw Vdq,Wdq (66),(v1)
+42: vgetexpps/d Vx,Wx (66),(ev)
+43: vgetexpss/d Vx,Hx,Wx (66),(ev)
+44: vplzcntd/q Vx,Wx (66),(ev)
+45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
+47: vpsllvd/q Vx,Hx,Wx (66),(v)
+# Skip 0x48-0x4b
+4c: vrcp14ps/d Vpd,Wpd (66),(ev)
+4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+# Skip 0x50-0x57
+58: vpbroadcastd Vx,Wx (66),(v)
+59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
+5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
+5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
+# Skip 0x5c-0x63
+64: vpblendmd/q Vx,Hx,Wx (66),(ev)
+65: vblendmps/d Vx,Hx,Wx (66),(ev)
+66: vpblendmb/w Vx,Hx,Wx (66),(ev)
+# Skip 0x67-0x74
+75: vpermi2b/w Vx,Hx,Wx (66),(ev)
+76: vpermi2d/q Vx,Hx,Wx (66),(ev)
+77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
+78: vpbroadcastb Vx,Wx (66),(v)
+79: vpbroadcastw Vx,Wx (66),(v)
+7a: vpbroadcastb Vx,Rv (66),(ev)
+7b: vpbroadcastw Vx,Rv (66),(ev)
+7c: vpbroadcastd/q Vx,Rv (66),(ev)
+7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
+7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
+7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
+80: INVEPT Gy,Mdq (66)
+81: INVPID Gy,Mdq (66)
+82: INVPCID Gy,Mdq (66)
+83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
+88: vexpandps/d Vpd,Wpd (66),(ev)
+89: vpexpandd/q Vx,Wx (66),(ev)
+8a: vcompressps/d Wx,Vx (66),(ev)
+8b: vpcompressd/q Wx,Vx (66),(ev)
+8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8d: vpermb/w Vx,Hx,Wx (66),(ev)
+8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
+# 0x0f 0x38 0x90-0xbf (FMA)
+90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
+91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
+92: vgatherdps/d Vx,Hx,Wx (66),(v)
+93: vgatherqps/d Vx,Hx,Wx (66),(v)
+94:
+95:
+96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a0: vpscatterdd/q Wx,Vx (66),(ev)
+a1: vpscatterqd/q Wx,Vx (66),(ev)
+a2: vscatterdps/d Wx,Vx (66),(ev)
+a3: vscatterqps/d Wx,Vx (66),(ev)
+a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
+b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
+b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+# 0x0f 0x38 0xc0-0xff
+c4: vpconflictd/q Vx,Wx (66),(ev)
+c6: Grp18 (1A)
+c7: Grp19 (1A)
+c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
+c9: sha1msg1 Vdq,Wdq
+ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
+cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
+cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
+cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+db: VAESIMC Vdq,Wdq (66),(v1)
+dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
+f2: ANDN Gy,By,Ey (v)
+f3: Grp17 (1A)
+f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
+f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+EndTable
+
+Table: 3-byte opcode 2 (0x0f 0x3a)
+Referrer: 3-byte escape 2
+AVXcode: 3
+# 0x0f 0x3a 0x00-0xff
+00: vpermq Vqq,Wqq,Ib (66),(v)
+01: vpermpd Vqq,Wqq,Ib (66),(v)
+02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
+04: vpermilps Vx,Wx,Ib (66),(v)
+05: vpermilpd Vx,Wx,Ib (66),(v)
+06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+07:
+08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
+09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
+0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
+0c: vblendps Vx,Hx,Wx,Ib (66)
+0d: vblendpd Vx,Hx,Wx,Ib (66)
+0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+17: vextractps Ed,Vdq,Ib (66),(v1)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
+1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
+20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
+26: vgetmantps/d Vx,Wx,Ib (66),(ev)
+27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
+30: kshiftrb/w Vk,Uk,Ib (66),(v)
+31: kshiftrd/q Vk,Uk,Ib (66),(v)
+32: kshiftlb/w Vk,Uk,Ib (66),(v)
+33: kshiftld/q Vk,Uk,Ib (66),(v)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
+3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
+40: vdpps Vx,Hx,Wx,Ib (66)
+41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
+43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
+51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
+54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
+55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
+56: vreduceps/d Vx,Wx,Ib (66),(ev)
+57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
+60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+66: vfpclassps/d Vk,Wx,Ib (66),(ev)
+67: vfpclassss/d Vk,Wx,Ib (66),(ev)
+cc: sha1rnds4 Vdq,Wdq,Ib
+df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+f0: RORX Gy,Ey,Ib (F2),(v)
+EndTable
+
+GrpTable: Grp1
+0: ADD
+1: OR
+2: ADC
+3: SBB
+4: AND
+5: SUB
+6: XOR
+7: CMP
+EndTable
+
+GrpTable: Grp1A
+0: POP
+EndTable
+
+GrpTable: Grp2
+0: ROL
+1: ROR
+2: RCL
+3: RCR
+4: SHL/SAL
+5: SHR
+6:
+7: SAR
+EndTable
+
+GrpTable: Grp3_1
+0: TEST Eb,Ib
+1:
+2: NOT Eb
+3: NEG Eb
+4: MUL AL,Eb
+5: IMUL AL,Eb
+6: DIV AL,Eb
+7: IDIV AL,Eb
+EndTable
+
+GrpTable: Grp3_2
+0: TEST Ev,Iz
+1:
+2: NOT Ev
+3: NEG Ev
+4: MUL rAX,Ev
+5: IMUL rAX,Ev
+6: DIV rAX,Ev
+7: IDIV rAX,Ev
+EndTable
+
+GrpTable: Grp4
+0: INC Eb
+1: DEC Eb
+EndTable
+
+GrpTable: Grp5
+0: INC Ev
+1: DEC Ev
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+2: CALLN Ev (f64)
+3: CALLF Ep
+4: JMPN Ev (f64)
+5: JMPF Mp
+6: PUSH Ev (d64)
+7:
+EndTable
+
+GrpTable: Grp6
+0: SLDT Rv/Mw
+1: STR Rv/Mw
+2: LLDT Ew
+3: LTR Ew
+4: VERR Ew
+5: VERW Ew
+EndTable
+
+GrpTable: Grp7
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+3: LIDT Ms
+4: SMSW Mw/Rv
+5: rdpkru (110),(11B) | wrpkru (111),(11B)
+6: LMSW Ew
+7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
+EndTable
+
+GrpTable: Grp8
+4: BT
+5: BTS
+6: BTR
+7: BTC
+EndTable
+
+GrpTable: Grp9
+1: CMPXCHG8B/16B Mq/Mdq
+3: xrstors
+4: xsavec
+5: xsaves
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
+EndTable
+
+GrpTable: Grp10
+EndTable
+
+# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
+GrpTable: Grp11A
+0: MOV Eb,Ib
+7: XABORT Ib (000),(11B)
+EndTable
+
+GrpTable: Grp11B
+0: MOV Eb,Iz
+7: XBEGIN Jz (000),(11B)
+EndTable
+
+GrpTable: Grp12
+2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp13
+0: vprord/q Hx,Wx,Ib (66),(ev)
+1: vprold/q Hx,Wx,Ib (66),(ev)
+2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
+6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp14
+2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp15
+0: fxsave | RDFSBASE Ry (F3),(11B)
+1: fxstor | RDGSBASE Ry (F3),(11B)
+2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
+4: XSAVE | ptwrite Ey (F3),(11B)
+5: XRSTOR | lfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B)
+7: clflush | clflushopt (66) | sfence (11B)
+EndTable
+
+GrpTable: Grp16
+0: prefetch NTA
+1: prefetch T0
+2: prefetch T1
+3: prefetch T2
+EndTable
+
+GrpTable: Grp17
+1: BLSR By,Ey (v)
+2: BLSMSK By,Ey (v)
+3: BLSI By,Ey (v)
+EndTable
+
+GrpTable: Grp18
+1: vgatherpf0dps/d Wx (66),(ev)
+2: vgatherpf1dps/d Wx (66),(ev)
+5: vscatterpf0dps/d Wx (66),(ev)
+6: vscatterpf1dps/d Wx (66),(ev)
+EndTable
+
+GrpTable: Grp19
+1: vgatherpf0qps/d Wx (66),(ev)
+2: vgatherpf1qps/d Wx (66),(ev)
+5: vscatterpf0qps/d Wx (66),(ev)
+6: vscatterpf1qps/d Wx (66),(ev)
+EndTable
+
+# AMD's Prefetch Group
+GrpTable: GrpP
+0: PREFETCH
+1: PREFETCHW
+EndTable
+
+GrpTable: GrpPDLK
+0: MONTMUL
+1: XSHA1
+2: XSHA2
+EndTable
+
+GrpTable: GrpRNG
+0: xstore-rng
+1: xcrypt-ecb
+2: xcrypt-cbc
+4: xcrypt-cfb
+5: xcrypt-ofb
+EndTable
diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
new file mode 100644 (file)
index 0000000..b02a36b
--- /dev/null
@@ -0,0 +1,393 @@
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+# gen-insn-attr-x86.awk: Instruction attribute table generator
+# Written by Masami Hiramatsu <mhiramat@redhat.com>
+#
+# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
+
+# Awk implementation sanity check
+function check_awk_implement() {
+       if (sprintf("%x", 0) != "0")
+               return "Your awk has a printf-format problem."
+       return ""
+}
+
+# Clear working vars
+function clear_vars() {
+       delete table
+       delete lptable2
+       delete lptable1
+       delete lptable3
+       eid = -1 # escape id
+       gid = -1 # group id
+       aid = -1 # AVX id
+       tname = ""
+}
+
+BEGIN {
+       # Implementation error checking
+       awkchecked = check_awk_implement()
+       if (awkchecked != "") {
+               print "Error: " awkchecked > "/dev/stderr"
+               print "Please try to use gawk." > "/dev/stderr"
+               exit 1
+       }
+
+       # Setup generating tables
+       print "/* x86 opcode map generated from x86-opcode-map.txt */"
+       print "/* Do not change this code. */\n"
+       ggid = 1
+       geid = 1
+       gaid = 0
+       delete etable
+       delete gtable
+       delete atable
+
+       opnd_expr = "^[A-Za-z/]"
+       ext_expr = "^\\("
+       sep_expr = "^\\|$"
+       group_expr = "^Grp[0-9A-Za-z]+"
+
+       imm_expr = "^[IJAOL][a-z]"
+       imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+       imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+       imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
+       imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
+       imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
+       imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
+       imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+       imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+       imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
+       imm_flag["Ob"] = "INAT_MOFFSET"
+       imm_flag["Ov"] = "INAT_MOFFSET"
+       imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+
+       modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
+       force64_expr = "\\([df]64\\)"
+       rex_expr = "^REX(\\.[XRWB]+)*"
+       fpu_expr = "^ESC" # TODO
+
+       lprefix1_expr = "\\((66|!F3)\\)"
+       lprefix2_expr = "\\(F3\\)"
+       lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+       lprefix_expr = "\\((66|F2|F3)\\)"
+       max_lprefix = 4
+
+       # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
+       # accepts VEX prefix
+       vexok_opcode_expr = "^[vk].*"
+       vexok_expr = "\\(v1\\)"
+       # All opcodes with (v) superscript supports *only* VEX prefix
+       vexonly_expr = "\\(v\\)"
+       # All opcodes with (ev) superscript supports *only* EVEX prefix
+       evexonly_expr = "\\(ev\\)"
+
+       prefix_expr = "\\(Prefix\\)"
+       prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
+       prefix_num["REPNE"] = "INAT_PFX_REPNE"
+       prefix_num["REP/REPE"] = "INAT_PFX_REPE"
+       prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
+       prefix_num["XRELEASE"] = "INAT_PFX_REPE"
+       prefix_num["LOCK"] = "INAT_PFX_LOCK"
+       prefix_num["SEG=CS"] = "INAT_PFX_CS"
+       prefix_num["SEG=DS"] = "INAT_PFX_DS"
+       prefix_num["SEG=ES"] = "INAT_PFX_ES"
+       prefix_num["SEG=FS"] = "INAT_PFX_FS"
+       prefix_num["SEG=GS"] = "INAT_PFX_GS"
+       prefix_num["SEG=SS"] = "INAT_PFX_SS"
+       prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
+       prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+       prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
+       prefix_num["EVEX"] = "INAT_PFX_EVEX"
+
+       clear_vars()
+}
+
+function semantic_error(msg) {
+       print "Semantic error at " NR ": " msg > "/dev/stderr"
+       exit 1
+}
+
+function debug(msg) {
+       print "DEBUG: " msg
+}
+
+function array_size(arr,   i,c) {
+       c = 0
+       for (i in arr)
+               c++
+       return c
+}
+
+/^Table:/ {
+       print "/* " $0 " */"
+       if (tname != "")
+               semantic_error("Hit Table: before EndTable:.");
+}
+
+/^Referrer:/ {
+       if (NF != 1) {
+               # escape opcode table
+               ref = ""
+               for (i = 2; i <= NF; i++)
+                       ref = ref $i
+               eid = escape[ref]
+               tname = sprintf("inat_escape_table_%d", eid)
+       }
+}
+
+/^AVXcode:/ {
+       if (NF != 1) {
+               # AVX/escape opcode table
+               aid = $2
+               if (gaid <= aid)
+                       gaid = aid + 1
+               if (tname == "")        # AVX only opcode table
+                       tname = sprintf("inat_avx_table_%d", $2)
+       }
+       if (aid == -1 && eid == -1)     # primary opcode table
+               tname = "inat_primary_table"
+}
+
+/^GrpTable:/ {
+       print "/* " $0 " */"
+       if (!($2 in group))
+               semantic_error("No group: " $2 )
+       gid = group[$2]
+       tname = "inat_group_table_" gid
+}
+
+function print_table(tbl,name,fmt,n)
+{
+       print "const insn_attr_t " name " = {"
+       for (i = 0; i < n; i++) {
+               id = sprintf(fmt, i)
+               if (tbl[id])
+                       print " [" id "] = " tbl[id] ","
+       }
+       print "};"
+}
+
+/^EndTable/ {
+       if (gid != -1) {
+               # print group tables
+               if (array_size(table) != 0) {
+                       print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
+                                   "0x%x", 8)
+                       gtable[gid,0] = tname
+               }
+               if (array_size(lptable1) != 0) {
+                       print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
+                                   "0x%x", 8)
+                       gtable[gid,1] = tname "_1"
+               }
+               if (array_size(lptable2) != 0) {
+                       print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
+                                   "0x%x", 8)
+                       gtable[gid,2] = tname "_2"
+               }
+               if (array_size(lptable3) != 0) {
+                       print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
+                                   "0x%x", 8)
+                       gtable[gid,3] = tname "_3"
+               }
+       } else {
+               # print primary/escaped tables
+               if (array_size(table) != 0) {
+                       print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
+                                   "0x%02x", 256)
+                       etable[eid,0] = tname
+                       if (aid >= 0)
+                               atable[aid,0] = tname
+               }
+               if (array_size(lptable1) != 0) {
+                       print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
+                                   "0x%02x", 256)
+                       etable[eid,1] = tname "_1"
+                       if (aid >= 0)
+                               atable[aid,1] = tname "_1"
+               }
+               if (array_size(lptable2) != 0) {
+                       print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
+                                   "0x%02x", 256)
+                       etable[eid,2] = tname "_2"
+                       if (aid >= 0)
+                               atable[aid,2] = tname "_2"
+               }
+               if (array_size(lptable3) != 0) {
+                       print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
+                                   "0x%02x", 256)
+                       etable[eid,3] = tname "_3"
+                       if (aid >= 0)
+                               atable[aid,3] = tname "_3"
+               }
+       }
+       print ""
+       clear_vars()
+}
+
+function add_flags(old,new) {
+       if (old && new)
+               return old " | " new
+       else if (old)
+               return old
+       else
+               return new
+}
+
+# convert operands to flags.
+function convert_operands(count,opnd,       i,j,imm,mod)
+{
+       imm = null
+       mod = null
+       for (j = 1; j <= count; j++) {
+               i = opnd[j]
+               if (match(i, imm_expr) == 1) {
+                       if (!imm_flag[i])
+                               semantic_error("Unknown imm opnd: " i)
+                       if (imm) {
+                               if (i != "Ib")
+                                       semantic_error("Second IMM error")
+                               imm = add_flags(imm, "INAT_SCNDIMM")
+                       } else
+                               imm = imm_flag[i]
+               } else if (match(i, modrm_expr))
+                       mod = "INAT_MODRM"
+       }
+       return add_flags(imm, mod)
+}
+
+/^[0-9a-f]+\:/ {
+       if (NR == 1)
+               next
+       # get index
+       idx = "0x" substr($1, 1, index($1,":") - 1)
+       if (idx in table)
+               semantic_error("Redefine " idx " in " tname)
+
+       # check if escaped opcode
+       if ("escape" == $2) {
+               if ($3 != "#")
+                       semantic_error("No escaped name")
+               ref = ""
+               for (i = 4; i <= NF; i++)
+                       ref = ref $i
+               if (ref in escape)
+                       semantic_error("Redefine escape (" ref ")")
+               escape[ref] = geid
+               geid++
+               table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
+               next
+       }
+
+       variant = null
+       # converts
+       i = 2
+       while (i <= NF) {
+               opcode = $(i++)
+               delete opnds
+               ext = null
+               flags = null
+               opnd = null
+               # parse one opcode
+               if (match($i, opnd_expr)) {
+                       opnd = $i
+                       count = split($(i++), opnds, ",")
+                       flags = convert_operands(count, opnds)
+               }
+               if (match($i, ext_expr))
+                       ext = $(i++)
+               if (match($i, sep_expr))
+                       i++
+               else if (i < NF)
+                       semantic_error($i " is not a separator")
+
+               # check if group opcode
+               if (match(opcode, group_expr)) {
+                       if (!(opcode in group)) {
+                               group[opcode] = ggid
+                               ggid++
+                       }
+                       flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
+               }
+               # check force(or default) 64bit
+               if (match(ext, force64_expr))
+                       flags = add_flags(flags, "INAT_FORCE64")
+
+               # check REX prefix
+               if (match(opcode, rex_expr))
+                       flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
+
+               # check coprocessor escape : TODO
+               if (match(opcode, fpu_expr))
+                       flags = add_flags(flags, "INAT_MODRM")
+
+               # check VEX codes
+               if (match(ext, evexonly_expr))
+                       flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
+               else if (match(ext, vexonly_expr))
+                       flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
+               else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
+                       flags = add_flags(flags, "INAT_VEXOK")
+
+               # check prefixes
+               if (match(ext, prefix_expr)) {
+                       if (!prefix_num[opcode])
+                               semantic_error("Unknown prefix: " opcode)
+                       flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
+               }
+               if (length(flags) == 0)
+                       continue
+               # check if last prefix
+               if (match(ext, lprefix1_expr)) {
+                       lptable1[idx] = add_flags(lptable1[idx],flags)
+                       variant = "INAT_VARIANT"
+               }
+               if (match(ext, lprefix2_expr)) {
+                       lptable2[idx] = add_flags(lptable2[idx],flags)
+                       variant = "INAT_VARIANT"
+               }
+               if (match(ext, lprefix3_expr)) {
+                       lptable3[idx] = add_flags(lptable3[idx],flags)
+                       variant = "INAT_VARIANT"
+               }
+               if (!match(ext, lprefix_expr)){
+                       table[idx] = add_flags(table[idx],flags)
+               }
+       }
+       if (variant)
+               table[idx] = add_flags(table[idx],variant)
+}
+
+END {
+       if (awkchecked != "")
+               exit 1
+       # print escape opcode map's array
+       print "/* Escape opcode map array */"
+       print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
+             "[INAT_LSTPFX_MAX + 1] = {"
+       for (i = 0; i < geid; i++)
+               for (j = 0; j < max_lprefix; j++)
+                       if (etable[i,j])
+                               print " ["i"]["j"] = "etable[i,j]","
+       print "};\n"
+       # print group opcode map's array
+       print "/* Group opcode map array */"
+       print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
+             "[INAT_LSTPFX_MAX + 1] = {"
+       for (i = 0; i < ggid; i++)
+               for (j = 0; j < max_lprefix; j++)
+                       if (gtable[i,j])
+                               print " ["i"]["j"] = "gtable[i,j]","
+       print "};\n"
+       # print AVX opcode map's array
+       print "/* AVX opcode map array */"
+       print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
+             "[INAT_LSTPFX_MAX + 1] = {"
+       for (i = 0; i < gaid; i++)
+               for (j = 0; j < max_lprefix; j++)
+                       if (atable[i,j])
+                               print " ["i"]["j"] = "atable[i,j]","
+       print "};"
+}
+
index a4139e386ef37471e7c0f71e66b6df05739e2360..b0e92a6d0903b0b18b8e451194d3df73269d8162 100644 (file)
@@ -18,7 +18,7 @@
 #ifndef _ORC_H
 #define _ORC_H
 
-#include "orc_types.h"
+#include <asm/orc_types.h>
 
 struct objtool_file;
 
diff --git a/tools/objtool/orc_types.h b/tools/objtool/orc_types.h
deleted file mode 100644 (file)
index 9c9dc57..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ORC_TYPES_H
-#define _ORC_TYPES_H
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-
-/*
- * The ORC_REG_* registers are base registers which are used to find other
- * registers on the stack.
- *
- * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
- * address of the previous frame: the caller's SP before it called the current
- * function.
- *
- * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
- * the current frame.
- *
- * The most commonly used base registers are SP and BP -- which the previous SP
- * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
- * usually based on.
- *
- * The rest of the base registers are needed for special cases like entry code
- * and GCC realigned stacks.
- */
-#define ORC_REG_UNDEFINED              0
-#define ORC_REG_PREV_SP                        1
-#define ORC_REG_DX                     2
-#define ORC_REG_DI                     3
-#define ORC_REG_BP                     4
-#define ORC_REG_SP                     5
-#define ORC_REG_R10                    6
-#define ORC_REG_R13                    7
-#define ORC_REG_BP_INDIRECT            8
-#define ORC_REG_SP_INDIRECT            9
-#define ORC_REG_MAX                    15
-
-/*
- * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
- * caller's SP right before it made the call).  Used for all callable
- * functions, i.e. all C code and all callable asm functions.
- *
- * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
- * to a fully populated pt_regs from a syscall, interrupt, or exception.
- *
- * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
- * points to the iret return frame.
- *
- * The UNWIND_HINT macros are used only for the unwind_hint struct.  They
- * aren't used in struct orc_entry due to size and complexity constraints.
- * Objtool converts them to real types when it converts the hints to orc
- * entries.
- */
-#define ORC_TYPE_CALL                  0
-#define ORC_TYPE_REGS                  1
-#define ORC_TYPE_REGS_IRET             2
-#define UNWIND_HINT_TYPE_SAVE          3
-#define UNWIND_HINT_TYPE_RESTORE       4
-
-#ifndef __ASSEMBLY__
-/*
- * This struct is more or less a vastly simplified version of the DWARF Call
- * Frame Information standard.  It contains only the necessary parts of DWARF
- * CFI, simplified for ease of access by the in-kernel unwinder.  It tells the
- * unwinder how to find the previous SP and BP (and sometimes entry regs) on
- * the stack for a given code address.  Each instance of the struct corresponds
- * to one or more code locations.
- */
-struct orc_entry {
-       s16             sp_offset;
-       s16             bp_offset;
-       unsigned        sp_reg:4;
-       unsigned        bp_reg:4;
-       unsigned        type:2;
-} __packed;
-
-/*
- * This struct is used by asm and inline asm code to manually annotate the
- * location of registers on the stack for the ORC unwinder.
- *
- * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
- */
-struct unwind_hint {
-       u32             ip;
-       s16             sp_offset;
-       u8              sp_reg;
-       u8              type;
-};
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ORC_TYPES_H */
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
new file mode 100755 (executable)
index 0000000..1470e74
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+FILES='
+arch/x86/lib/insn.c
+arch/x86/lib/inat.c
+arch/x86/lib/x86-opcode-map.txt
+arch/x86/tools/gen-insn-attr-x86.awk
+arch/x86/include/asm/insn.h
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/orc_types.h
+'
+
+check()
+{
+       local file=$1
+
+       diff $file ../../$file > /dev/null ||
+               echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
+}
+
+if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
+       exit 0
+fi
+
+for i in $FILES; do
+  check $i
+done
index da205d1fa03c546c4972d1e696a5eeeaab1d9711..1dd5f4fcffd53f375ba00479bfed37d867399c4a 100644 (file)
@@ -26,7 +26,7 @@ endif
 
 ifneq ($(OUTPUT),)
 # check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
 $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 endif
 
index c25a74ae51baef13bfa5609d2957af76941597f6..2bb3eef7d5c1fbf36d420be2801e74b4f7eeb049 100644 (file)
@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
 
        dprintf("set %s as cpufreq governor\n", governor);
 
-       if (cpupower_is_cpu_online(cpu) != 0) {
+       if (cpupower_is_cpu_online(cpu) != 1) {
                perror("cpufreq_cpu_exists");
                fprintf(stderr, "error: cpu %u does not exist\n", cpu);
                return -1;
index 1b5da0066ebf90bfe4c441fab62fd26b4cc99267..5b3205f1621749bb6ebc340413ae16d957064cb9 100644 (file)
@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
 {
        int num;
        char *tmp;
+       int this_cpu;
+
+       this_cpu = sched_getcpu();
 
        /* Assume idle state count is the same for all CPUs */
-       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
+       cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
 
        if (cpuidle_sysfs_monitor.hw_states_num <= 0)
                return NULL;
 
        for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
-               tmp = cpuidle_state_name(0, num);
+               tmp = cpuidle_state_name(this_cpu, num);
                if (tmp == NULL)
                        continue;
 
@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
                strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
                free(tmp);
 
-               tmp = cpuidle_state_desc(0, num);
+               tmp = cpuidle_state_desc(this_cpu, num);
                if (tmp == NULL)
                        continue;
                strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
index 654efd9768fd3687924d6c2caf75a9b5425b8225..3fab179b1abac797a55952dd69b78848f74fdc64 100644 (file)
@@ -13,7 +13,7 @@ endif
 
 # check that the output directory actually exists
 ifneq ($(OUTPUT),)
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
 $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
 endif
 
index 3c9c0bbe7dbb669786f6700c8418f35edc74b6b6..eaf599dc2137b85c18dbb09b80465528ca07c7b0 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
-TARGETS =  bpf
+TARGETS = android
+TARGETS += bpf
 TARGETS += breakpoints
 TARGETS += capabilities
 TARGETS += cpufreq
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
new file mode 100644 (file)
index 0000000..1a74922
--- /dev/null
@@ -0,0 +1,46 @@
+SUBDIRS := ion
+
+TEST_PROGS := run.sh
+
+.PHONY: all clean
+
+include ../lib.mk
+
+all:
+       @for DIR in $(SUBDIRS); do              \
+               BUILD_TARGET=$(OUTPUT)/$$DIR;   \
+               mkdir $$BUILD_TARGET  -p;       \
+               make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
+               #SUBDIR test prog name should be in the form: SUBDIR_test.sh
+               TEST=$$DIR"_test.sh"; \
+               if [ -e $$DIR/$$TEST ]; then
+                       rsync -a $$DIR/$$TEST $$BUILD_TARGET/;
+               fi
+       done
+
+override define RUN_TESTS
+       @cd $(OUTPUT); ./run.sh
+endef
+
+override define INSTALL_RULE
+       mkdir -p $(INSTALL_PATH)
+       install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
+
+       @for SUBDIR in $(SUBDIRS); do \
+               BUILD_TARGET=$(OUTPUT)/$$SUBDIR;        \
+               mkdir $$BUILD_TARGET  -p;       \
+               $(MAKE) OUTPUT=$$BUILD_TARGET -C $$SUBDIR INSTALL_PATH=$(INSTALL_PATH)/$$SUBDIR install; \
+       done;
+endef
+
+override define EMIT_TESTS
+       echo "./run.sh"
+endef
+
+override define CLEAN
+       @for DIR in $(SUBDIRS); do              \
+               BUILD_TARGET=$(OUTPUT)/$$DIR;   \
+               mkdir $$BUILD_TARGET  -p;       \
+               make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
+       done
+endef
diff --git a/tools/testing/selftests/android/ion/.gitignore b/tools/testing/selftests/android/ion/.gitignore
new file mode 100644 (file)
index 0000000..67e6f39
--- /dev/null
@@ -0,0 +1,2 @@
+ionapp_export
+ionapp_import
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile
new file mode 100644 (file)
index 0000000..96e0c44
--- /dev/null
@@ -0,0 +1,16 @@
+
+INCLUDEDIR := -I. -I../../../../../drivers/staging/android/uapi/
+CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
+
+TEST_GEN_FILES := ionapp_export ionapp_import
+
+all: $(TEST_GEN_FILES)
+
+$(TEST_GEN_FILES): ipcsocket.c ionutils.c
+
+TEST_PROGS := ion_test.sh
+
+include ../../lib.mk
+
+$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
+$(OUTPUT)/ionapp_import: ionapp_import.c ipcsocket.c ionutils.c
diff --git a/tools/testing/selftests/android/ion/README b/tools/testing/selftests/android/ion/README
new file mode 100644 (file)
index 0000000..21783e9
--- /dev/null
@@ -0,0 +1,101 @@
+ION BUFFER SHARING UTILITY
+==========================
+File: ion_test.sh : Utility to test ION driver buffer sharing mechanism.
+Author: Pintu Kumar <pintu.ping@gmail.com>
+
+Introduction:
+-------------
+This is a test utility to verify ION buffer sharing in user space
+between 2 independent processes.
+It uses unix domain socket (with SCM_RIGHTS) as IPC to transfer an FD to
+another process to share the same buffer.
+This utility demonstrates how ION buffer sharing can be implemented between
+two user space processes, using various heap types.
+The following heap types are supported by ION driver.
+ION_HEAP_TYPE_SYSTEM (0)
+ION_HEAP_TYPE_SYSTEM_CONTIG (1)
+ION_HEAP_TYPE_CARVEOUT (2)
+ION_HEAP_TYPE_CHUNK (3)
+ION_HEAP_TYPE_DMA (4)
+
+By default only the SYSTEM and SYSTEM_CONTIG heaps are supported.
+Each heap is associated with the respective heap id.
+This utility is designed in the form of client/server program.
+The server part (ionapp_export) is the exporter of the buffer.
+It is responsible for creating an ION client, allocating the buffer based on
+the heap id, writing some data to this buffer and then exporting the FD
+(associated with this buffer) to another process using socket IPC.
+This FD is called as buffer FD (which is different than the ION client FD).
+
+The client part (ionapp_import) is the importer of the buffer.
+It retrives the FD from the socket data and installs into its address space.
+This new FD internally points to the same kernel buffer.
+So first it reads the data that is stored in this buffer and prints it.
+Then it writes the different size of data (it could be different data) to the
+same buffer.
+Finally the buffer FD must be closed by both the exporter and importer.
+Thus the same kernel buffer is shared among two user space processes using
+ION driver and only one time allocation.
+
+Prerequisite:
+-------------
+This utility works only if /dev/ion interface is present.
+The following configs needs to be enabled in kernel to include ion driver.
+CONFIG_ANDROID=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+
+This utility requires to be run as root user.
+
+
+Compile and test:
+-----------------
+This utility is made to be run as part of kselftest framework in kernel.
+To compile and run using kselftest you can simply do the following from the
+kernel top directory.
+linux$ make TARGETS=android kselftest
+Or you can also use:
+linux$ make -C tools/testing/selftests TARGETS=android run_tests
+Using the selftest it can directly execute the ion_test.sh script to test the
+buffer sharing using ion system heap.
+Currently the heap size is hard coded as just 10 bytes inside this script.
+You need to be a root user to run under selftest.
+
+You can also compile and test manually using the following steps:
+ion$ make
+These will generate 2 executable: ionapp_export, ionapp_import
+Now you can run the export and import manually by specifying the heap type
+and the heap size.
+You can also directly execute the shell script to run the test automatically.
+Simply use the following command to run the test.
+ion$ sudo ./ion_test.sh
+
+Test Results:
+-------------
+The utility is verified on Ubuntu-32 bit system with Linux Kernel 4.14.
+Here is the snapshot of the test result using kselftest.
+
+linux# make TARGETS=android kselftest
+heap_type: 0, heap_size: 10
+--------------------------------------
+heap type: 0
+  heap id: 1
+heap name: ion_system_heap
+--------------------------------------
+Fill buffer content:
+0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd
+Sharing fd: 6, Client fd: 5
+<ion_close_buffer_fd>: buffer release successfully....
+Received buffer fd: 4
+Read buffer content:
+0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0x0 0x0 0x0 0x0 0x0 0x0
+0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0
+Fill buffer content:
+0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd
+0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd
+0xfd 0xfd
+<ion_close_buffer_fd>: buffer release successfully....
+ion_test.sh: heap_type: 0 - [PASS]
+
+ion_test.sh: done
diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/ion/config
new file mode 100644 (file)
index 0000000..19db6ca
--- /dev/null
@@ -0,0 +1,4 @@
+CONFIG_ANDROID=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
diff --git a/tools/testing/selftests/android/ion/ion.h b/tools/testing/selftests/android/ion/ion.h
new file mode 100644 (file)
index 0000000..f7021ac
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This file is copied from drivers/staging/android/uapi/ion.h
+ * This local copy is required for the selftest to pass, when build
+ * outside the kernel source tree.
+ * Please keep this file in sync with its original file until the
+ * ion driver is moved outside the staging tree.
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:       memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:     memory allocated from a prereserved
+ *                              carveout heap, allocations are physically
+ *                              contiguous
+ * @ION_HEAP_TYPE_DMA:          memory allocated via DMA API
+ * @ION_NUM_HEAPS:              helper for iterating over heaps, a bit mask
+ *                              is used to identify the heaps, so only 32
+ *                              total heap types are supported
+ */
+enum ion_heap_type {
+       ION_HEAP_TYPE_SYSTEM,
+       ION_HEAP_TYPE_SYSTEM_CONTIG,
+       ION_HEAP_TYPE_CARVEOUT,
+       ION_HEAP_TYPE_CHUNK,
+       ION_HEAP_TYPE_DMA,
+       ION_HEAP_TYPE_CUSTOM, /*
+                              * must be last so device specific heaps always
+                              * are at the end of this enum
+                              */
+};
+
+#define ION_NUM_HEAP_IDS               (sizeof(unsigned int) * 8)
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+
+/*
+ * mappings of this buffer should be cached, ion will do cache maintenance
+ * when the buffer is mapped for dma
+ */
+#define ION_FLAG_CACHED 1
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:               size of the allocation
+ * @heap_id_mask:      mask of heap ids to allocate from
+ * @flags:             flags passed to heap
+ * @handle:            pointer that will be populated with a cookie to use to
+ *                     refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+       __u64 len;
+       __u32 heap_id_mask;
+       __u32 flags;
+       __u32 fd;
+       __u32 unused;
+};
+
+#define MAX_HEAP_NAME                  32
+
+/**
+ * struct ion_heap_data - data about a heap
+ * @name - first 32 characters of the heap name
+ * @type - heap type
+ * @heap_id - heap id for the heap
+ */
+struct ion_heap_data {
+       char name[MAX_HEAP_NAME];
+       __u32 type;
+       __u32 heap_id;
+       __u32 reserved0;
+       __u32 reserved1;
+       __u32 reserved2;
+};
+
+/**
+ * struct ion_heap_query - collection of data about all heaps
+ * @cnt - total number of heaps to be copied
+ * @heaps - buffer to copy heap data
+ */
+struct ion_heap_query {
+       __u32 cnt; /* Total number of heaps to be copied */
+       __u32 reserved0; /* align to 64bits */
+       __u64 heaps; /* buffer to be populated */
+       __u32 reserved1;
+       __u32 reserved2;
+};
+
+#define ION_IOC_MAGIC          'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC          _IOWR(ION_IOC_MAGIC, 0, \
+                                     struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_HEAP_QUERY - information about available heaps
+ *
+ * Takes an ion_heap_query structure and populates information about
+ * available Ion heaps.
+ */
+#define ION_IOC_HEAP_QUERY     _IOWR(ION_IOC_MAGIC, 8, \
+                                       struct ion_heap_query)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/tools/testing/selftests/android/ion/ion_test.sh b/tools/testing/selftests/android/ion/ion_test.sh
new file mode 100755 (executable)
index 0000000..a1aff50
--- /dev/null
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+heapsize=4096
+TCID="ion_test.sh"
+errcode=0
+
+run_test()
+{
+       heaptype=$1
+       ./ionapp_export -i $heaptype -s $heapsize &
+       sleep 1
+       ./ionapp_import
+       if [ $? -ne 0 ]; then
+               echo "$TCID: heap_type: $heaptype - [FAIL]"
+               errcode=1
+       else
+               echo "$TCID: heap_type: $heaptype - [PASS]"
+       fi
+       sleep 1
+       echo ""
+}
+
+check_root()
+{
+       uid=$(id -u)
+       if [ $uid -ne 0 ]; then
+               echo $TCID: must be run as root >&2
+               exit 0
+       fi
+}
+
+check_device()
+{
+       DEVICE=/dev/ion
+       if [ ! -e $DEVICE ]; then
+               echo $TCID: No $DEVICE device found >&2
+               echo $TCID: May be CONFIG_ION is not set >&2
+               exit 0
+       fi
+}
+
+main_function()
+{
+       check_device
+       check_root
+
+       # ION_SYSTEM_HEAP TEST
+       run_test 0
+       # ION_SYSTEM_CONTIG_HEAP TEST
+       run_test 1
+}
+
+main_function
+echo "$TCID: done"
+exit $errcode
diff --git a/tools/testing/selftests/android/ion/ionapp_export.c b/tools/testing/selftests/android/ion/ionapp_export.c
new file mode 100644 (file)
index 0000000..a944e72
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * ionapp_export.c
+ *
+ * It is a user space utility to create and export android
+ * ion memory buffer fd to another process using unix domain socket as IPC.
+ * This acts like a server for ionapp_import(client).
+ * So, this server has to be started first before the client.
+ *
+ * Copyright (C) 2017 Pintu Kumar <pintu.ping@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/time.h>
+#include "ionutils.h"
+#include "ipcsocket.h"
+
+
+void print_usage(int argc, char *argv[])
+{
+       printf("Usage: %s [-h <help>] [-i <heap id>] [-s <size in bytes>]\n",
+               argv[0]);
+}
+
+int main(int argc, char *argv[])
+{
+       int opt, ret, status, heapid;
+       int sockfd, client_fd, shared_fd;
+       unsigned char *map_buf;
+       unsigned long map_len, heap_type, heap_size, flags;
+       struct ion_buffer_info info;
+       struct socket_info skinfo;
+
+       if (argc < 2) {
+               print_usage(argc, argv);
+               return -1;
+       }
+
+       heap_size = 0;
+       flags = 0;
+
+       while ((opt = getopt(argc, argv, "hi:s:")) != -1) {
+               switch (opt) {
+               case 'h':
+                       print_usage(argc, argv);
+                       exit(0);
+                       break;
+               case 'i':
+                       heapid = atoi(optarg);
+                       switch (heapid) {
+                       case 0:
+                               heap_type = ION_HEAP_TYPE_SYSTEM;
+                               break;
+                       case 1:
+                               heap_type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+                               break;
+                       default:
+                               printf("ERROR: heap type not supported\n");
+                               exit(1);
+                       }
+                       break;
+               case 's':
+                       heap_size = atoi(optarg);
+                       break;
+               default:
+                       print_usage(argc, argv);
+                       exit(1);
+                       break;
+               }
+       }
+
+       if (heap_size <= 0) {
+               printf("heap_size cannot be 0\n");
+               print_usage(argc, argv);
+               exit(1);
+       }
+
+       printf("heap_type: %ld, heap_size: %ld\n", heap_type, heap_size);
+       info.heap_type = heap_type;
+       info.heap_size = heap_size;
+       info.flag_type = flags;
+
+       /* This is server: open the socket connection first */
+       /* Here; 1 indicates server or exporter */
+       status = opensocket(&sockfd, SOCKET_NAME, 1);
+       if (status < 0) {
+               fprintf(stderr, "<%s>: Failed opensocket.\n", __func__);
+               goto err_socket;
+       }
+       skinfo.sockfd = sockfd;
+
+       ret = ion_export_buffer_fd(&info);
+       if (ret < 0) {
+               fprintf(stderr, "FAILED: ion_get_buffer_fd\n");
+               goto err_export;
+       }
+       client_fd = info.ionfd;
+       shared_fd = info.buffd;
+       map_buf = info.buffer;
+       map_len = info.buflen;
+       write_buffer(map_buf, map_len);
+
+       /* share ion buf fd with other user process */
+       printf("Sharing fd: %d, Client fd: %d\n", shared_fd, client_fd);
+       skinfo.datafd = shared_fd;
+       skinfo.buflen = map_len;
+
+       ret = socket_send_fd(&skinfo);
+       if (ret < 0) {
+               fprintf(stderr, "FAILED: socket_send_fd\n");
+               goto err_send;
+       }
+
+err_send:
+err_export:
+       ion_close_buffer_fd(&info);
+
+err_socket:
+       closesocket(sockfd, SOCKET_NAME);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/android/ion/ionapp_import.c b/tools/testing/selftests/android/ion/ionapp_import.c
new file mode 100644 (file)
index 0000000..ae2d704
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * ionapp_import.c
+ *
+ * It is a user space utility to receive android ion memory buffer fd
+ * over unix domain socket IPC that can be exported by ionapp_export.
+ * This acts like a client for ionapp_export.
+ *
+ * Copyright (C) 2017 Pintu Kumar <pintu.ping@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include "ionutils.h"
+#include "ipcsocket.h"
+
+
+int main(void)
+{
+       int ret, status;
+       int sockfd, shared_fd;
+       unsigned char *map_buf;
+       unsigned long map_len;
+       struct ion_buffer_info info;
+       struct socket_info skinfo;
+
+       /* This is the client part. Here 0 means client or importer */
+       status = opensocket(&sockfd, SOCKET_NAME, 0);
+       if (status < 0) {
+               fprintf(stderr, "No exporter exists...\n");
+               ret = status;
+               goto err_socket;
+       }
+
+       skinfo.sockfd = sockfd;
+
+       ret = socket_receive_fd(&skinfo);
+       if (ret < 0) {
+               fprintf(stderr, "Failed: socket_receive_fd\n");
+               goto err_recv;
+       }
+
+       shared_fd = skinfo.datafd;
+       printf("Received buffer fd: %d\n", shared_fd);
+       if (shared_fd <= 0) {
+               fprintf(stderr, "ERROR: improper buf fd\n");
+               ret = -1;
+               goto err_fd;
+       }
+
+       memset(&info, 0, sizeof(info));
+       info.buffd = shared_fd;
+       info.buflen = ION_BUFFER_LEN;
+
+       ret = ion_import_buffer_fd(&info);
+       if (ret < 0) {
+               fprintf(stderr, "Failed: ion_use_buffer_fd\n");
+               goto err_import;
+       }
+
+       map_buf = info.buffer;
+       map_len = info.buflen;
+       read_buffer(map_buf, map_len);
+
+       /* Write probably new data to the same buffer again */
+       map_len = ION_BUFFER_LEN;
+       write_buffer(map_buf, map_len);
+
+err_import:
+       ion_close_buffer_fd(&info);
+err_fd:
+err_recv:
+err_socket:
+       closesocket(sockfd, SOCKET_NAME);
+
+       return ret;
+}
diff --git a/tools/testing/selftests/android/ion/ionutils.c b/tools/testing/selftests/android/ion/ionutils.c
new file mode 100644 (file)
index 0000000..ce69c14
--- /dev/null
@@ -0,0 +1,259 @@
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+//#include <stdint.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include "ionutils.h"
+#include "ipcsocket.h"
+
+
+void write_buffer(void *buffer, unsigned long len)
+{
+       int i;
+       unsigned char *ptr = (unsigned char *)buffer;
+
+       if (!ptr) {
+               fprintf(stderr, "<%s>: Invalid buffer...\n", __func__);
+               return;
+       }
+
+       printf("Fill buffer content:\n");
+       memset(ptr, 0xfd, len);
+       for (i = 0; i < len; i++)
+               printf("0x%x ", ptr[i]);
+       printf("\n");
+}
+
+void read_buffer(void *buffer, unsigned long len)
+{
+       int i;
+       unsigned char *ptr = (unsigned char *)buffer;
+
+       if (!ptr) {
+               fprintf(stderr, "<%s>: Invalid buffer...\n", __func__);
+               return;
+       }
+
+       printf("Read buffer content:\n");
+       for (i = 0; i < len; i++)
+               printf("0x%x ", ptr[i]);
+       printf("\n");
+}
+
+int ion_export_buffer_fd(struct ion_buffer_info *ion_info)
+{
+       int i, ret, ionfd, buffer_fd;
+       unsigned int heap_id;
+       unsigned long maplen;
+       unsigned char *map_buffer;
+       struct ion_allocation_data alloc_data;
+       struct ion_heap_query query;
+       struct ion_heap_data heap_data[MAX_HEAP_COUNT];
+
+       if (!ion_info) {
+               fprintf(stderr, "<%s>: Invalid ion info\n", __func__);
+               return -1;
+       }
+
+       /* Create an ION client */
+       ionfd = open(ION_DEVICE, O_RDWR);
+       if (ionfd < 0) {
+               fprintf(stderr, "<%s>: Failed to open ion client: %s\n",
+                       __func__, strerror(errno));
+               return -1;
+       }
+
+       memset(&query, 0, sizeof(query));
+       query.cnt = MAX_HEAP_COUNT;
+       query.heaps = (unsigned long int)&heap_data[0];
+       /* Query ION heap_id_mask from ION heap */
+       ret = ioctl(ionfd, ION_IOC_HEAP_QUERY, &query);
+       if (ret < 0) {
+               fprintf(stderr, "<%s>: Failed: ION_IOC_HEAP_QUERY: %s\n",
+                       __func__, strerror(errno));
+               goto err_query;
+       }
+
+       heap_id = MAX_HEAP_COUNT + 1;
+       for (i = 0; i < query.cnt; i++) {
+               if (heap_data[i].type == ion_info->heap_type) {
+                       printf("--------------------------------------\n");
+                       printf("heap type: %d\n", heap_data[i].type);
+                       printf("  heap id: %d\n", heap_data[i].heap_id);
+                       printf("heap name: %s\n", heap_data[i].name);
+                       printf("--------------------------------------\n");
+                       heap_id = heap_data[i].heap_id;
+                       break;
+               }
+       }
+
+       if (heap_id > MAX_HEAP_COUNT) {
+               fprintf(stderr, "<%s>: ERROR: heap type does not exists\n",
+                       __func__);
+               goto err_heap;
+       }
+
+       alloc_data.len = ion_info->heap_size;
+       alloc_data.heap_id_mask = 1 << heap_id;
+       alloc_data.flags = ion_info->flag_type;
+
+       /* Allocate memory for this ION client as per heap_type */
+       ret = ioctl(ionfd, ION_IOC_ALLOC, &alloc_data);
+       if (ret < 0) {
+               fprintf(stderr, "<%s>: Failed: ION_IOC_ALLOC: %s\n",
+                       __func__, strerror(errno));
+               goto err_alloc;
+       }
+
+       /* This will return a valid buffer fd */
+       buffer_fd = alloc_data.fd;
+       maplen = alloc_data.len;
+
+       if (buffer_fd < 0 || maplen <= 0) {
+               fprintf(stderr, "<%s>: Invalid map data, fd: %d, len: %ld\n",
+                       __func__, buffer_fd, maplen);
+               goto err_fd_data;
+       }
+
+       /* Create memory mapped buffer for the buffer fd */
+       map_buffer = (unsigned char *)mmap(NULL, maplen, PROT_READ|PROT_WRITE,
+                       MAP_SHARED, buffer_fd, 0);
+       if (map_buffer == MAP_FAILED) {
+               fprintf(stderr, "<%s>: Failed: mmap: %s\n",
+                       __func__, strerror(errno));
+               goto err_mmap;
+       }
+
+       ion_info->ionfd = ionfd;
+       ion_info->buffd = buffer_fd;
+       ion_info->buffer = map_buffer;
+       ion_info->buflen = maplen;
+
+       return 0;
+
+       munmap(map_buffer, maplen);
+
+err_fd_data:
+err_mmap:
+       /* in case of error: close the buffer fd */
+       if (buffer_fd)
+               close(buffer_fd);
+
+err_query:
+err_heap:
+err_alloc:
+       /* In case of error: close the ion client fd */
+       if (ionfd)
+               close(ionfd);
+
+       return -1;
+}
+
+int ion_import_buffer_fd(struct ion_buffer_info *ion_info)
+{
+       int buffd;
+       unsigned char *map_buf;
+       unsigned long map_len;
+
+       if (!ion_info) {
+               fprintf(stderr, "<%s>: Invalid ion info\n", __func__);
+               return -1;
+       }
+
+       map_len = ion_info->buflen;
+       buffd = ion_info->buffd;
+
+       if (buffd < 0 || map_len <= 0) {
+               fprintf(stderr, "<%s>: Invalid map data, fd: %d, len: %ld\n",
+                       __func__, buffd, map_len);
+               goto err_buffd;
+       }
+
+       map_buf = (unsigned char *)mmap(NULL, map_len, PROT_READ|PROT_WRITE,
+                       MAP_SHARED, buffd, 0);
+       if (map_buf == MAP_FAILED) {
+               printf("<%s>: Failed - mmap: %s\n",
+                       __func__, strerror(errno));
+               goto err_mmap;
+       }
+
+       ion_info->buffer = map_buf;
+       ion_info->buflen = map_len;
+
+       return 0;
+
+err_mmap:
+       if (buffd)
+               close(buffd);
+
+err_buffd:
+       return -1;
+}
+
+void ion_close_buffer_fd(struct ion_buffer_info *ion_info)
+{
+       if (ion_info) {
+               /* unmap the buffer properly in the end */
+               munmap(ion_info->buffer, ion_info->buflen);
+               /* close the buffer fd */
+               if (ion_info->buffd > 0)
+                       close(ion_info->buffd);
+               /* Finally, close the client fd */
+               if (ion_info->ionfd > 0)
+                       close(ion_info->ionfd);
+               printf("<%s>: buffer release successfully....\n", __func__);
+       }
+}
+
+int socket_send_fd(struct socket_info *info)
+{
+       int status;
+       int fd, sockfd;
+       struct socketdata skdata;
+
+       if (!info) {
+               fprintf(stderr, "<%s>: Invalid socket info\n", __func__);
+               return -1;
+       }
+
+       sockfd = info->sockfd;
+       fd = info->datafd;
+       memset(&skdata, 0, sizeof(skdata));
+       skdata.data = fd;
+       skdata.len = sizeof(skdata.data);
+       status = sendtosocket(sockfd, &skdata);
+       if (status < 0) {
+               fprintf(stderr, "<%s>: Failed: sendtosocket\n", __func__);
+               return -1;
+       }
+
+       return 0;
+}
+
+int socket_receive_fd(struct socket_info *info)
+{
+       int status;
+       int fd, sockfd;
+       struct socketdata skdata;
+
+       if (!info) {
+               fprintf(stderr, "<%s>: Invalid socket info\n", __func__);
+               return -1;
+       }
+
+       sockfd = info->sockfd;
+       memset(&skdata, 0, sizeof(skdata));
+       status = receivefromsocket(sockfd, &skdata);
+       if (status < 0) {
+               fprintf(stderr, "<%s>: Failed: receivefromsocket\n", __func__);
+               return -1;
+       }
+
+       fd = (int)skdata.data;
+       info->datafd = fd;
+
+       return status;
+}
diff --git a/tools/testing/selftests/android/ion/ionutils.h b/tools/testing/selftests/android/ion/ionutils.h
new file mode 100644 (file)
index 0000000..9941eb8
--- /dev/null
@@ -0,0 +1,55 @@
+#ifndef __ION_UTILS_H
+#define __ION_UTILS_H
+
+#include "ion.h"
+
+#define SOCKET_NAME "ion_socket"
+#define ION_DEVICE "/dev/ion"
+
+#define ION_BUFFER_LEN 4096
+#define MAX_HEAP_COUNT ION_HEAP_TYPE_CUSTOM
+
+struct socket_info {
+       int sockfd;
+       int datafd;
+       unsigned long buflen;
+};
+
+struct ion_buffer_info {
+       int ionfd;
+       int buffd;
+       unsigned int heap_type;
+       unsigned int flag_type;
+       unsigned long heap_size;
+       unsigned long buflen;
+       unsigned char *buffer;
+};
+
+
+/* This is used to fill the data into the mapped buffer */
+void write_buffer(void *buffer, unsigned long len);
+
+/* This is used to read the data from the exported buffer */
+void read_buffer(void *buffer, unsigned long len);
+
+/* This is used to create an ION buffer FD for the kernel buffer
+ * So you can export this same buffer to others in the form of FD
+ */
+int ion_export_buffer_fd(struct ion_buffer_info *ion_info);
+
+/* This is used to import or map an exported FD.
+ * So we point to same buffer without making a copy. Hence zero-copy.
+ */
+int ion_import_buffer_fd(struct ion_buffer_info *ion_info);
+
+/* This is used to close all references for the ION client */
+void ion_close_buffer_fd(struct ion_buffer_info *ion_info);
+
+/* This is used to send FD to another process using socket IPC */
+int socket_send_fd(struct socket_info *skinfo);
+
+/* This is used to receive FD from another process using socket IPC */
+int socket_receive_fd(struct socket_info *skinfo);
+
+
+#endif
diff --git a/tools/testing/selftests/android/ion/ipcsocket.c b/tools/testing/selftests/android/ion/ipcsocket.c
new file mode 100644 (file)
index 0000000..7dc5210
--- /dev/null
@@ -0,0 +1,227 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/un.h>
+#include <errno.h>
+
+#include "ipcsocket.h"
+
+
+int opensocket(int *sockfd, const char *name, int connecttype)
+{
+       int ret, temp = 1;
+
+       if (!name || strlen(name) > MAX_SOCK_NAME_LEN) {
+               fprintf(stderr, "<%s>: Invalid socket name.\n", __func__);
+               return -1;
+       }
+
+       ret = socket(PF_LOCAL, SOCK_STREAM, 0);
+       if (ret < 0) {
+               fprintf(stderr, "<%s>: Failed socket: <%s>\n",
+                       __func__, strerror(errno));
+               return ret;
+       }
+
+       *sockfd = ret;
+       if (setsockopt(*sockfd, SOL_SOCKET, SO_REUSEADDR,
+               (char *)&temp, sizeof(int)) < 0) {
+               fprintf(stderr, "<%s>: Failed setsockopt: <%s>\n",
+               __func__, strerror(errno));
+               goto err;
+       }
+
+       sprintf(sock_name, "/tmp/%s", name);
+
+       if (connecttype == 1) {
+               /* This is for Server connection */
+               struct sockaddr_un skaddr;
+               int clientfd;
+               socklen_t sklen;
+
+               unlink(sock_name);
+               memset(&skaddr, 0, sizeof(skaddr));
+               skaddr.sun_family = AF_LOCAL;
+               strcpy(skaddr.sun_path, sock_name);
+
+               ret = bind(*sockfd, (struct sockaddr *)&skaddr,
+                       SUN_LEN(&skaddr));
+               if (ret < 0) {
+                       fprintf(stderr, "<%s>: Failed bind: <%s>\n",
+                       __func__, strerror(errno));
+                       goto err;
+               }
+
+               ret = listen(*sockfd, 5);
+               if (ret < 0) {
+                       fprintf(stderr, "<%s>: Failed listen: <%s>\n",
+                       __func__, strerror(errno));
+                       goto err;
+               }
+
+               memset(&skaddr, 0, sizeof(skaddr));
+               sklen = sizeof(skaddr);
+
+               ret = accept(*sockfd, (struct sockaddr *)&skaddr,
+                       (socklen_t *)&sklen);
+               if (ret < 0) {
+                       fprintf(stderr, "<%s>: Failed accept: <%s>\n",
+                       __func__, strerror(errno));
+                       goto err;
+               }
+
+               clientfd = ret;
+               *sockfd = clientfd;
+       } else {
+               /* This is for client connection */
+               struct sockaddr_un skaddr;
+
+               memset(&skaddr, 0, sizeof(skaddr));
+               skaddr.sun_family = AF_LOCAL;
+               strcpy(skaddr.sun_path, sock_name);
+
+               ret = connect(*sockfd, (struct sockaddr *)&skaddr,
+                       SUN_LEN(&skaddr));
+               if (ret < 0) {
+                       fprintf(stderr, "<%s>: Failed connect: <%s>\n",
+                       __func__, strerror(errno));
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       if (*sockfd)
+               close(*sockfd);
+
+       return ret;
+}
+
+int sendtosocket(int sockfd, struct socketdata *skdata)
+{
+       int ret, buffd;
+       unsigned int len;
+       char cmsg_b[CMSG_SPACE(sizeof(int))];
+       struct cmsghdr *cmsg;
+       struct msghdr msgh;
+       struct iovec iov;
+       struct timeval timeout;
+       fd_set selFDs;
+
+       if (!skdata) {
+               fprintf(stderr, "<%s>: socketdata is NULL\n", __func__);
+               return -1;
+       }
+
+       FD_ZERO(&selFDs);
+       FD_SET(0, &selFDs);
+       FD_SET(sockfd, &selFDs);
+       timeout.tv_sec = 20;
+       timeout.tv_usec = 0;
+
+       ret = select(sockfd+1, NULL, &selFDs, NULL, &timeout);
+       if (ret < 0) {
+               fprintf(stderr, "<%s>: Failed select: <%s>\n",
+               __func__, strerror(errno));
+               return -1;
+       }
+
+       if (FD_ISSET(sockfd, &selFDs)) {
+               buffd = skdata->data;
+               len = skdata->len;
+               memset(&msgh, 0, sizeof(msgh));
+               msgh.msg_control = &cmsg_b;
+               msgh.msg_controllen = CMSG_LEN(len);
+               iov.iov_base = "OK";
+               iov.iov_len = 2;
+               msgh.msg_iov = &iov;
+               msgh.msg_iovlen = 1;
+               cmsg = CMSG_FIRSTHDR(&msgh);
+               cmsg->cmsg_level = SOL_SOCKET;
+               cmsg->cmsg_type = SCM_RIGHTS;
+               cmsg->cmsg_len = CMSG_LEN(len);
+               memcpy(CMSG_DATA(cmsg), &buffd, len);
+
+               ret = sendmsg(sockfd, &msgh, MSG_DONTWAIT);
+               if (ret < 0) {
+                       fprintf(stderr, "<%s>: Failed sendmsg: <%s>\n",
+                       __func__, strerror(errno));
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+int receivefromsocket(int sockfd, struct socketdata *skdata)
+{
+       int ret, buffd;
+       unsigned int len = 0;
+       char cmsg_b[CMSG_SPACE(sizeof(int))];
+       struct cmsghdr *cmsg;
+       struct msghdr msgh;
+       struct iovec iov;
+       fd_set recvFDs;
+       char data[32];
+
+       if (!skdata) {
+               fprintf(stderr, "<%s>: socketdata is NULL\n", __func__);
+               return -1;
+       }
+
+       FD_ZERO(&recvFDs);
+       FD_SET(0, &recvFDs);
+       FD_SET(sockfd, &recvFDs);
+
+       ret = select(sockfd+1, &recvFDs, NULL, NULL, NULL);
+       if (ret < 0) {
+               fprintf(stderr, "<%s>: Failed select: <%s>\n",
+               __func__, strerror(errno));
+               return -1;
+       }
+
+       if (FD_ISSET(sockfd, &recvFDs)) {
+               len = sizeof(buffd);
+               memset(&msgh, 0, sizeof(msgh));
+               msgh.msg_control = &cmsg_b;
+               msgh.msg_controllen = CMSG_LEN(len);
+               iov.iov_base = data;
+               iov.iov_len = sizeof(data)-1;
+               msgh.msg_iov = &iov;
+               msgh.msg_iovlen = 1;
+               cmsg = CMSG_FIRSTHDR(&msgh);
+               cmsg->cmsg_level = SOL_SOCKET;
+               cmsg->cmsg_type = SCM_RIGHTS;
+               cmsg->cmsg_len = CMSG_LEN(len);
+
+               ret = recvmsg(sockfd, &msgh, MSG_DONTWAIT);
+               if (ret < 0) {
+                       fprintf(stderr, "<%s>: Failed recvmsg: <%s>\n",
+                       __func__, strerror(errno));
+                       return -1;
+               }
+
+               memcpy(&buffd, CMSG_DATA(cmsg), len);
+               skdata->data = buffd;
+               skdata->len = len;
+       }
+       return 0;
+}
+
+int closesocket(int sockfd, char *name)
+{
+       char sockname[MAX_SOCK_NAME_LEN];
+
+       if (sockfd)
+               close(sockfd);
+       sprintf(sockname, "/tmp/%s", name);
+       unlink(sockname);
+       shutdown(sockfd, 2);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/android/ion/ipcsocket.h b/tools/testing/selftests/android/ion/ipcsocket.h
new file mode 100644 (file)
index 0000000..b3e8449
--- /dev/null
@@ -0,0 +1,35 @@
+
+#ifndef _IPCSOCKET_H
+#define _IPCSOCKET_H
+
+
+#define MAX_SOCK_NAME_LEN      64
+
+char sock_name[MAX_SOCK_NAME_LEN];
+
+/* This structure is responsible for holding the IPC data
+ * data: hold the buffer fd
+ * len: just the length of 32-bit integer fd
+ */
+struct socketdata {
+       int data;
+       unsigned int len;
+};
+
+/* This API is used to open the IPC socket connection
+ * name: implies a unique socket name in the system
+ * connecttype: implies server(0) or client(1)
+ */
+int opensocket(int *sockfd, const char *name, int connecttype);
+
+/* This is the API to send socket data over IPC socket */
+int sendtosocket(int sockfd, struct socketdata *data);
+
+/* This is the API to receive socket data over IPC socket */
+int receivefromsocket(int sockfd, struct socketdata *data);
+
+/* This is the API to close the socket connection */
+int closesocket(int sockfd, char *name);
+
+
+#endif
diff --git a/tools/testing/selftests/android/run.sh b/tools/testing/selftests/android/run.sh
new file mode 100755 (executable)
index 0000000..dd8edf2
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+(cd ion; ./ion_test.sh)
index bf092b83e45382e063fbfadf5ae5c84613fdd9f5..3c64f30cf63cc2b6adb532a3b1f3201533193f7f 100644 (file)
@@ -4377,11 +4377,10 @@ static struct bpf_test tests[] = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
-                       BPF_MOV64_IMM(BPF_REG_1, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
-                       BPF_MOV64_IMM(BPF_REG_3, 0),
-                       BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_trace_printk),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
@@ -4481,14 +4480,12 @@ static struct bpf_test tests[] = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                                offsetof(struct test_val, foo)),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
-                       BPF_MOV64_IMM(BPF_REG_1, 0),
-                       BPF_MOV64_IMM(BPF_REG_3, 0),
-                       BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_trace_printk),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
@@ -4618,18 +4615,16 @@ static struct bpf_test tests[] = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
-                       BPF_MOV64_IMM(BPF_REG_1, 0),
-                       BPF_MOV64_IMM(BPF_REG_3, 0),
-                       BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_trace_printk),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr = "R2 min value is outside of the array range",
+               .errstr = "R1 min value is outside of the array range",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
@@ -4760,20 +4755,18 @@ static struct bpf_test tests[] = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
                        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
                        BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
-                               offsetof(struct test_val, foo), 4),
+                               offsetof(struct test_val, foo), 3),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
-                       BPF_MOV64_IMM(BPF_REG_1, 0),
-                       BPF_MOV64_IMM(BPF_REG_3, 0),
-                       BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_trace_printk),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr = "R2 min value is outside of the array range",
+               .errstr = "R1 min value is outside of the array range",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
@@ -5638,7 +5631,7 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "helper access to variable memory: size = 0 allowed on NULL",
+               "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_1, 0),
                        BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5652,7 +5645,7 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "helper access to variable memory: size > 0 not allowed on NULL",
+               "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_1, 0),
                        BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5670,7 +5663,7 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "helper access to variable memory: size = 0 allowed on != NULL stack pointer",
+               "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
@@ -5687,7 +5680,7 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "helper access to variable memory: size = 0 allowed on != NULL map pointer",
+               "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
                .insns = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5709,7 +5702,7 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer",
+               "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
                .insns = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5734,7 +5727,7 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "helper access to variable memory: size possible = 0 allowed on != NULL map pointer",
+               "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
                .insns = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -5757,7 +5750,7 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer",
+               "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
                .insns = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
@@ -5778,6 +5771,105 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 type=inv expected=fp",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R1 type=inv expected=fp",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
        {
                "helper access to variable memory: 8 bytes leak",
                .insns = {
index 960d02100c26ebbf6214cde9965ba787817fd2c2..2d95e5adde726fb388b26de8e0db87343110d036 100644 (file)
@@ -19,6 +19,7 @@
 
 #define _GNU_SOURCE
 
+#include <asm/ptrace.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/ptrace.h>
index e6ab090cfbf3a708eb202927df02e8e3057407c7..d4aca2ad5069bf5db35f7f32b9643cce34b88e43 100644 (file)
@@ -1,2 +1 @@
 CONFIG_NOTIFIER_ERROR_INJECTION=y
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
index 8d5d1d2ee7c1d793405685e8266045363bcce960..67cd4597db2b78e7fb29b9121c8014afbce4b648 100644 (file)
@@ -147,7 +147,7 @@ static void exe_cp(const char *src, const char *dest)
 }
 
 #define XX_DIR_LEN 200
-static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
+static int check_execveat_pathmax(int root_dfd, const char *src, int is_script)
 {
        int fail = 0;
        int ii, count, len;
@@ -156,20 +156,30 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
 
        if (*longpath == '\0') {
                /* Create a filename close to PATH_MAX in length */
+               char *cwd = getcwd(NULL, 0);
+
+               if (!cwd) {
+                       printf("Failed to getcwd(), errno=%d (%s)\n",
+                              errno, strerror(errno));
+                       return 2;
+               }
+               strcpy(longpath, cwd);
+               strcat(longpath, "/");
                memset(longname, 'x', XX_DIR_LEN - 1);
                longname[XX_DIR_LEN - 1] = '/';
                longname[XX_DIR_LEN] = '\0';
-               count = (PATH_MAX - 3) / XX_DIR_LEN;
+               count = (PATH_MAX - 3 - strlen(cwd)) / XX_DIR_LEN;
                for (ii = 0; ii < count; ii++) {
                        strcat(longpath, longname);
                        mkdir(longpath, 0755);
                }
-               len = (PATH_MAX - 3) - (count * XX_DIR_LEN);
+               len = (PATH_MAX - 3 - strlen(cwd)) - (count * XX_DIR_LEN);
                if (len <= 0)
                        len = 1;
                memset(longname, 'y', len);
                longname[len] = '\0';
                strcat(longpath, longname);
+               free(cwd);
        }
        exe_cp(src, longpath);
 
@@ -190,7 +200,7 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
        }
 
        /*
-        * Execute as a long pathname relative to ".".  If this is a script,
+        * Execute as a long pathname relative to "/".  If this is a script,
         * the interpreter will launch but fail to open the script because its
         * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
         *
@@ -200,10 +210,10 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
         * the exit status shall be 126."), so allow either.
         */
        if (is_script)
-               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+               fail += check_execveat_invoked_rc(root_dfd, longpath + 1, 0,
                                                  127, 126);
        else
-               fail += check_execveat(dot_dfd, longpath, 0);
+               fail += check_execveat(root_dfd, longpath + 1, 0);
 
        return fail;
 }
@@ -218,6 +228,7 @@ static int run_tests(void)
        int subdir_dfd_ephemeral = open_or_die("subdir.ephemeral",
                                               O_DIRECTORY|O_RDONLY);
        int dot_dfd = open_or_die(".", O_DIRECTORY|O_RDONLY);
+       int root_dfd = open_or_die("/", O_DIRECTORY|O_RDONLY);
        int dot_dfd_path = open_or_die(".", O_DIRECTORY|O_RDONLY|O_PATH);
        int dot_dfd_cloexec = open_or_die(".", O_DIRECTORY|O_RDONLY|O_CLOEXEC);
        int fd = open_or_die("execveat", O_RDONLY);
@@ -353,8 +364,8 @@ static int run_tests(void)
        /* Attempt to execute relative to non-directory => ENOTDIR */
        fail += check_execveat_fail(fd, "execveat", 0, ENOTDIR);
 
-       fail += check_execveat_pathmax(dot_dfd, "execveat", 0);
-       fail += check_execveat_pathmax(dot_dfd, "script", 1);
+       fail += check_execveat_pathmax(root_dfd, "execveat", 0);
+       fail += check_execveat_pathmax(root_dfd, "script", 1);
        return fail;
 }
 
index a52a3bab532b95c277f2954209e4493061a41e4e..34a42c68ebfb81235738c0d5afc9723ebd068f21 100755 (executable)
@@ -86,6 +86,11 @@ load_fw_cancel()
 
 load_fw_custom()
 {
+       if [ ! -e "$DIR"/trigger_custom_fallback ]; then
+               echo "$0: custom fallback trigger not present, ignoring test" >&2
+               return 1
+       fi
+
        local name="$1"
        local file="$2"
 
@@ -108,11 +113,17 @@ load_fw_custom()
 
        # Wait for request to finish.
        wait
+       return 0
 }
 
 
 load_fw_custom_cancel()
 {
+       if [ ! -e "$DIR"/trigger_custom_fallback ]; then
+               echo "$0: canceling custom fallback trigger not present, ignoring test" >&2
+               return 1
+       fi
+
        local name="$1"
        local file="$2"
 
@@ -133,6 +144,7 @@ load_fw_custom_cancel()
 
        # Wait for request to finish.
        wait
+       return 0
 }
 
 load_fw_fallback_with_child()
@@ -227,20 +239,22 @@ else
        echo "$0: cancelling fallback mechanism works"
 fi
 
-load_fw_custom "$NAME" "$FW"
-if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
-       echo "$0: firmware was not loaded" >&2
-       exit 1
-else
-       echo "$0: custom fallback loading mechanism works"
+if load_fw_custom "$NAME" "$FW" ; then
+       if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+               echo "$0: firmware was not loaded" >&2
+               exit 1
+       else
+               echo "$0: custom fallback loading mechanism works"
+       fi
 fi
 
-load_fw_custom_cancel "nope-$NAME" "$FW"
-if diff -q "$FW" /dev/test_firmware >/dev/null ; then
-       echo "$0: firmware was expected to be cancelled" >&2
-       exit 1
-else
-       echo "$0: cancelling custom fallback mechanism works"
+if load_fw_custom_cancel "nope-$NAME" "$FW" ; then
+       if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+               echo "$0: firmware was expected to be cancelled" >&2
+               exit 1
+       else
+               echo "$0: cancelling custom fallback mechanism works"
+       fi
 fi
 
 set +e
index 62f2d6f54929a4260a767e06d0b08161c55b7437..b1f20fef36c77444d3de0730dfb73c223060ac56 100755 (executable)
@@ -70,9 +70,13 @@ if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then
        exit 1
 fi
 
-if printf '\000' >"$DIR"/trigger_async_request 2> /dev/null; then
-       echo "$0: empty filename should not succeed (async)" >&2
-       exit 1
+if [ ! -e "$DIR"/trigger_async_request ]; then
+       echo "$0: empty filename: async trigger not present, ignoring test" >&2
+else
+       if printf '\000' >"$DIR"/trigger_async_request 2> /dev/null; then
+               echo "$0: empty filename should not succeed (async)" >&2
+               exit 1
+       fi
 fi
 
 # Request a firmware that doesn't exist, it should fail.
@@ -105,17 +109,21 @@ else
 fi
 
 # Try the asynchronous version too
-if ! echo -n "$NAME" >"$DIR"/trigger_async_request ; then
-       echo "$0: could not trigger async request" >&2
-       exit 1
-fi
-
-# Verify the contents are what we expect.
-if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
-       echo "$0: firmware was not loaded (async)" >&2
-       exit 1
+if [ ! -e "$DIR"/trigger_async_request ]; then
+       echo "$0: firmware loading: async trigger not present, ignoring test" >&2
 else
-       echo "$0: async filesystem loading works"
+       if ! echo -n "$NAME" >"$DIR"/trigger_async_request ; then
+               echo "$0: could not trigger async request" >&2
+               exit 1
+       fi
+
+       # Verify the contents are what we expect.
+       if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+               echo "$0: firmware was not loaded (async)" >&2
+               exit 1
+       else
+               echo "$0: async filesystem loading works"
+       fi
 fi
 
 ### Batched requests tests
index 8a1c9f949fe082889aa4057ed1886c16249ef2b9..b01924c71c097c1aeb2c48c8497033266896e876 100644 (file)
@@ -1,2 +1,6 @@
 CONFIG_KPROBES=y
 CONFIG_FTRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_STACK_TRACER=y
+CONFIG_HIST_TRIGGERS=y
index abc706cf7702629e8b8ce1f12d352cd362e797cf..f9a9d424c980cc932f527904b5425786cde567dc 100755 (executable)
@@ -222,7 +222,14 @@ SIG_RESULT=
 SIG_BASE=36    # Use realtime signals
 SIG_PID=$$
 
+exit_pass () {
+  exit 0
+}
+
 SIG_FAIL=$((SIG_BASE + FAIL))
+exit_fail () {
+  exit 1
+}
 trap 'SIG_RESULT=$FAIL' $SIG_FAIL
 
 SIG_UNRESOLVED=$((SIG_BASE + UNRESOLVED))
index aa51f6c17359e18de0823f125ed7239b5f29c841..0696098d6408ae13eed6e84b0160868e6b3adfde 100644 (file)
@@ -2,4 +2,4 @@
 # description: Basic event tracing check
 test -f available_events -a -f set_event -a -d events
 # check scheduler events are available
-grep -q sched available_events && exit 0 || exit $FAIL
+grep -q sched available_events && exit_pass || exit_fail
index 6ff851a7588461581ea6040987f056d2233b43b9..9daf034186f59a7215a0060af14634112ba42bfd 100644 (file)
@@ -11,7 +11,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 yield() {
index cc14feec6e1f2dd96a2f0aabf6ad02d45e00e069..132478b305c23ea11d042d2f2240e21d1b4eeda6 100644 (file)
@@ -13,7 +13,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 yield() {
index 85094904aa79b3aa2d2e720ba4bc1a6ba763a5aa..6a37a8642ee6ef7bee7326cabb7bc56c8dc77d2c 100644 (file)
@@ -11,7 +11,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 yield() {
index cc1cf4d30ef5d3729822a8d068c8df2e5d52e49c..4e9b6e2c02198bc82e7f2c4ddb5ddffd99408359 100644 (file)
@@ -10,7 +10,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 yield() {
index 45df747887e07254dd5d1e7b056b7202ccc69530..1aec99d108eb9c8dfe85d192b1673ec501177fa8 100644 (file)
@@ -28,7 +28,7 @@ do_reset() {
 fail() { # msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 disable_tracing
index 0387e22e757757894fe1a553bb6bd0a41e538a67..9f8d27ca39cf5b8807005c43b28a3bc70fde251c 100644 (file)
@@ -18,7 +18,7 @@ do_reset() {
 fail() { # msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 disable_tracing
index 78524fcc25aee94ec2414fbc53a668d7ab09fdcf..524ce24b3c22010c1908f4dbb50a8035f3087520 100644 (file)
@@ -51,7 +51,7 @@ do_reset() {
 fail() { # msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 yield() {
index 9d4afcca1e36aae049308e9faf7fcab6c05876e0..6fed4cf2db81d420e43c8c7d8b159a8e3fd3ea17 100644 (file)
@@ -27,7 +27,7 @@ do_reset() {
 fail() { # mesg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 SLEEP_TIME=".1"
@@ -48,8 +48,7 @@ test_event_enabled() {
 
     e=`cat $EVENT_ENABLE`
     if [ "$e" != $val ]; then
-       echo "Expected $val but found $e"
-       exit 1
+       fail "Expected $val but found $e"
     fi
 }
 
index fe0dc5a7ea2685d7288c15e224cd5d2e3c85d2c8..b2d5a8febfe86d1be803d0f32ea2d3b1adecaf89 100644 (file)
@@ -32,7 +32,7 @@ fail() { # mesg
     reset_tracer
     echo > set_ftrace_filter
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 echo "Testing function tracer with profiler:"
index 5ad723724adb9c72d5a94dfc373eb095e5e76943..0f3f92622e3352de7bcf7d18d7100e962e47b13b 100644 (file)
@@ -26,14 +26,14 @@ do_reset() {
 fail() { # mesg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 do_reset
 
 FILTER=set_ftrace_filter
 FUNC1="schedule"
-FUNC2="do_IRQ"
+FUNC2="do_softirq"
 
 ALL_FUNCS="#### all functions enabled ####"
 
index cdc92a371cd73f78d22c964c2afac13201108597..f6d9ac73268aece207c476a1995997e4c3895950 100644 (file)
@@ -27,7 +27,7 @@ do_reset() {
 fail() { # mesg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 SLEEP_TIME=".1"
index d7f48b55df51cb1c4e1f13f1d67b6af9272dcc96..4fa0f79144f4abbf96ea294443a4c726d6296ee1 100644 (file)
@@ -11,7 +11,7 @@ fail() { # mesg
     rmdir foo 2>/dev/null
     echo $1
     set -e
-    exit $FAIL
+    exit_fail
 }
 
 cd instances
index ddda622033664a50af8556df1d50c863f07f5866..b84651283bf3412e36e86b25ac578edbee2d8a08 100644 (file)
@@ -11,7 +11,7 @@ fail() { # mesg
     rmdir x y z 2>/dev/null
     echo $1
     set -e
-    exit $FAIL
+    exit_fail
 }
 
 cd instances
index 0e6f415c6152af1aa97f0a1e8413113c952a7c8d..bbc443a9190c9d2fa29512980850279dcdce2f4c 100644 (file)
@@ -9,7 +9,7 @@ echo > kprobe_events
 echo p:myevent _do_fork > kprobe_events
 test -d events/kprobes/myevent
 echo 1 > events/kprobes/myevent/enable
-echo > kprobe_events && exit 1 # this must fail
+echo > kprobe_events && exit_fail # this must fail
 echo 0 > events/kprobes/myevent/enable
 echo > kprobe_events # this must succeed
 clear_trace
index 679bbd23bcc3fe833f1359b7c8b5827efc4ae89d..8b43c6804fc33e384e65a7bb93df2ea2263d0790 100644 (file)
@@ -14,5 +14,5 @@ echo 1 > events/kprobes/testprobe/enable
 echo 0 > events/kprobes/testprobe/enable
 echo "-:testprobe" >> kprobe_events
 clear_trace
-test -d events/kprobes/testprobe && exit 1 || exit 0
+test -d events/kprobes/testprobe && exit_fail || exit_pass
 
index 17d33ba192f64ad1784802569af54fba5490baa4..2a1755bfc2900ef7f709a5b8e3434418d4f22d2c 100644 (file)
@@ -35,4 +35,4 @@ check_types $ARGS
 
 echo "-:testprobe" >> kprobe_events
 clear_trace
-test -d events/kprobes/testprobe && exit 1 || exit 0
+test -d events/kprobes/testprobe && exit_fail || exit_pass
index f1825bdbe3f30776e24415f1992ee203666350b1..321954683aaa9ed166406ccde15e69eed126d266 100644 (file)
@@ -14,4 +14,4 @@ echo 1 > events/kprobes/testprobe2/enable
 echo 0 > events/kprobes/testprobe2/enable
 echo '-:testprobe2' >> kprobe_events
 clear_trace
-test -d events/kprobes/testprobe2 && exit 1 || exit 0
+test -d events/kprobes/testprobe2 && exit_fail || exit_pass
index 5448f7abad5f3763b7ca06dbcc01bb416035eaeb..5c39ceb18a0d6b3efe3dd65f654b4eaa4ce322e0 100644 (file)
@@ -4,6 +4,7 @@
 # Note that all tests are run with "errexit" option.
 
 exit 0 # Return 0 if the test is passed, otherwise return !0
+# Or you can call exit_pass for passed test, and exit_fail for failed test.
 # If the test could not run because of lack of feature, call exit_unsupported
 # If the test returned unclear results, call exit_unresolved
 # If the test is a dummy, or a placeholder, call exit_untested
index 839ac4320b24e7a4ca171f7c70076b6a9d4268e6..28cc355a3a7b23181dc9fecb74f7a2cdba56e615 100644 (file)
@@ -12,7 +12,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index 66873c4b12c952503643c92ee580d9929f4623db..a48e23eb8a8be85a81324aca2aee51fbfa352ff5 100644 (file)
@@ -12,7 +12,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index 4237b32769f14552c9f0ca9ee16357c95fc52345..8da80efc44d88d741d1cab1e837ce8a4d80d8233 100644 (file)
@@ -12,7 +12,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index d24e2b8bd8633b8ec2a8344b044928d49c7f41dc..449fe9ff91a250ed6bc171f30e01d44c8a58ccde 100644 (file)
@@ -12,7 +12,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index 4c0774fff37815e20803b4f8d7acaba87af49621..c5ef8b9d02b3b8cfb8a360b0d15b332a64f0b564 100644 (file)
@@ -12,7 +12,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index 3fc6321e081f327798b08c09d7ff21bcd2693545..ed38f0050d7721c88f494b389bd1a1c63e5a58a0 100644 (file)
@@ -11,7 +11,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index 3652824f81ed6a9766e2f45ef44feee0a79c38ed..3121d795a868ce3e1ed87b02cf513c22b2cd523b 100644 (file)
@@ -11,7 +11,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index 6d9051cdf408f32449602cafb6c8c60ca708abe9..c59d9eb546daed21c889cbdc96a455aa0b149b3d 100644 (file)
@@ -11,7 +11,7 @@ do_reset() {
 fail() { #msg
     do_reset
     echo $1
-    exit $FAIL
+    exit_fail
 }
 
 if [ ! -f set_event -o ! -d events/sched ]; then
index 845e5f67b6f0245fabedc8367ecefb3ba3ad0de9..132a54f74e883963bf45a42cfc17533a306e8bd6 100644 (file)
@@ -515,7 +515,7 @@ static void mfd_assert_grow_write(int fd)
 
        buf = malloc(mfd_def_size * 8);
        if (!buf) {
-               printf("malloc(%d) failed: %m\n", mfd_def_size * 8);
+               printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
                abort();
        }
 
@@ -535,7 +535,7 @@ static void mfd_fail_grow_write(int fd)
 
        buf = malloc(mfd_def_size * 8);
        if (!buf) {
-               printf("malloc(%d) failed: %m\n", mfd_def_size * 8);
+               printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
                abort();
        }
 
index 23db11c94b59abbcc881e3c80dcf93b50d9e3abe..86636d207adf72c12403355cae88d3bfe819fd80 100644 (file)
@@ -4,10 +4,10 @@ all:
 include ../lib.mk
 
 TEST_PROGS := mem-on-off-test.sh
-override RUN_TESTS := ./mem-on-off-test.sh -r 2 || echo "selftests: memory-hotplug [FAIL]"
+override RUN_TESTS := ./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
 override EMIT_TESTS := echo "$(RUN_TESTS)"
 
 run_full_test:
-       @/bin/bash ./mem-on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
+       @/bin/bash ./mem-on-off-test.sh && echo "memory-hotplug selftests: [PASS]" || echo "memory-hotplug selftests: [FAIL]"
 
 clean:
index 346d83ca8069ab2a9f5c4decf26d624d289be988..5af29d3a1b0ab6b26d68a11004c5a58cde3ae484 100644 (file)
@@ -1 +1,2 @@
 seccomp_bpf
+seccomp_benchmark
index cc986621f512d949634113960f8d1c6ae46354af..2c8ac8416299fb9221ddb24ad9929a539d6f9382 100644 (file)
@@ -18,3 +18,5 @@ threadtest
 valid-adjtimex
 adjtick
 set-tz
+freq-step
+rtctest_setdate
index 8daeb7d7032c2a0b79802bb9f8674a0d0d6aeb74..2df26bd0099ccb0033f0d48bbc4d9485bb42f5da 100644 (file)
@@ -19,6 +19,19 @@ extern void *vdso_sym(const char *version, const char *name);
 extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
 extern void vdso_init_from_auxv(void *auxv);
 
+/*
+ * ARM64's vDSO exports its gettimeofday() implementation with a different
+ * name and version from other architectures, so we need to handle it as
+ * a special case.
+ */
+#if defined(__aarch64__)
+const char *version = "LINUX_2.6.39";
+const char *name = "__kernel_gettimeofday";
+#else
+const char *version = "LINUX_2.6";
+const char *name = "__vdso_gettimeofday";
+#endif
+
 int main(int argc, char **argv)
 {
        unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
@@ -31,10 +44,10 @@ int main(int argc, char **argv)
 
        /* Find gettimeofday. */
        typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
-       gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
+       gtod_t gtod = (gtod_t)vdso_sym(version, name);
 
        if (!gtod) {
-               printf("Could not find __vdso_gettimeofday\n");
+               printf("Could not find %s\n", name);
                return 1;
        }
 
@@ -45,7 +58,7 @@ int main(int argc, char **argv)
                printf("The time is %lld.%06lld\n",
                       (long long)tv.tv_sec, (long long)tv.tv_usec);
        } else {
-               printf("__vdso_gettimeofday failed\n");
+               printf("%s failed\n", name);
        }
 
        return 0;
index 142c565bb3518d0311f0049f135f7de55d336301..1ca2ee4d15b9e64dd34352c3fabf1a8d3ede45d7 100644 (file)
@@ -8,3 +8,5 @@ on-fault-limit
 transhuge-stress
 userfaultfd
 mlock-intersect-test
+mlock-random-test
+virtual_address_range
index e49eca1915f8ca564bc0182f750cfcbf6cfbc267..7f45806bd8632401b25e567802906451e9806303 100644 (file)
@@ -18,6 +18,7 @@ TEST_GEN_FILES += transhuge-stress
 TEST_GEN_FILES += userfaultfd
 TEST_GEN_FILES += mlock-random-test
 TEST_GEN_FILES += virtual_address_range
+TEST_GEN_FILES += gup_benchmark
 
 TEST_PROGS := run_vmtests
 
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
new file mode 100644 (file)
index 0000000..36df551
--- /dev/null
@@ -0,0 +1,91 @@
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <linux/types.h>
+
+#define MB (1UL << 20)
+#define PAGE_SIZE sysconf(_SC_PAGESIZE)
+
+#define GUP_FAST_BENCHMARK     _IOWR('g', 1, struct gup_benchmark)
+
+struct gup_benchmark {
+       __u64 delta_usec;
+       __u64 addr;
+       __u64 size;
+       __u32 nr_pages_per_call;
+       __u32 flags;
+};
+
+int main(int argc, char **argv)
+{
+       struct gup_benchmark gup;
+       unsigned long size = 128 * MB;
+       int i, fd, opt, nr_pages = 1, thp = -1, repeats = 1, write = 0;
+       char *p;
+
+       while ((opt = getopt(argc, argv, "m:r:n:tT")) != -1) {
+               switch (opt) {
+               case 'm':
+                       size = atoi(optarg) * MB;
+                       break;
+               case 'r':
+                       repeats = atoi(optarg);
+                       break;
+               case 'n':
+                       nr_pages = atoi(optarg);
+                       break;
+               case 't':
+                       thp = 1;
+                       break;
+               case 'T':
+                       thp = 0;
+                       break;
+               case 'w':
+                       write = 1;
+               default:
+                       return -1;
+               }
+       }
+
+       gup.nr_pages_per_call = nr_pages;
+       gup.flags = write;
+
+       fd = open("/sys/kernel/debug/gup_benchmark", O_RDWR);
+       if (fd == -1)
+               perror("open"), exit(1);
+
+       p = mmap(NULL, size, PROT_READ | PROT_WRITE,
+                       MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+       if (p == MAP_FAILED)
+               perror("mmap"), exit(1);
+       gup.addr = (unsigned long)p;
+
+       if (thp == 1)
+               madvise(p, size, MADV_HUGEPAGE);
+       else if (thp == 0)
+               madvise(p, size, MADV_NOHUGEPAGE);
+
+       for (; (unsigned long)p < gup.addr + size; p += PAGE_SIZE)
+               p[0] = 0;
+
+       for (i = 0; i < repeats; i++) {
+               gup.size = size;
+               if (ioctl(fd, GUP_FAST_BENCHMARK, &gup))
+                       perror("ioctl"), exit(1);
+
+               printf("Time: %lld us", gup.delta_usec);
+               if (gup.size != size)
+                       printf(", truncated (size: %lld)", gup.size);
+               printf("\n");
+       }
+
+       return 0;
+}
diff --git a/tools/testing/selftests/x86/5lvl.c b/tools/testing/selftests/x86/5lvl.c
new file mode 100644 (file)
index 0000000..2eafdcd
--- /dev/null
@@ -0,0 +1,177 @@
+#include <stdio.h>
+#include <sys/mman.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define PAGE_SIZE      4096
+#define LOW_ADDR       ((void *) (1UL << 30))
+#define HIGH_ADDR      ((void *) (1UL << 50))
+
+struct testcase {
+       void *addr;
+       unsigned long size;
+       unsigned long flags;
+       const char *msg;
+       unsigned int low_addr_required:1;
+       unsigned int keep_mapped:1;
+};
+
+static struct testcase testcases[] = {
+       {
+               .addr = NULL,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(NULL)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = LOW_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(LOW_ADDR)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR) again",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1) again",
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE),
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)",
+               .low_addr_required = 1,
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE / 2),
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)",
+               .low_addr_required = 1,
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE),
+               .size = 2 * PAGE_SIZE,
+               .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)",
+       },
+       {
+               .addr = NULL,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(NULL, MAP_HUGETLB)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = LOW_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
+               .low_addr_required = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = HIGH_ADDR,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1, MAP_HUGETLB)",
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void*) -1,
+               .size = 2UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap(-1, MAP_HUGETLB) again",
+       },
+       {
+               .addr = (void *)((1UL << 47) - PAGE_SIZE),
+               .size = 4UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+               .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)",
+               .low_addr_required = 1,
+               .keep_mapped = 1,
+       },
+       {
+               .addr = (void *)((1UL << 47) - (2UL << 20)),
+               .size = 4UL << 20,
+               .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+               .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)",
+       },
+};
+
+int main(int argc, char **argv)
+{
+       int i;
+       void *p;
+
+       for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+               struct testcase *t = testcases + i;
+
+               p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0);
+
+               printf("%s: %p - ", t->msg, p);
+
+               if (p == MAP_FAILED) {
+                       printf("FAILED\n");
+                       continue;
+               }
+
+               if (t->low_addr_required && p >= (void *)(1UL << 47))
+                       printf("FAILED\n");
+               else
+                       printf("OK\n");
+               if (!t->keep_mapped)
+                       munmap(p, t->size);
+       }
+       return 0;
+}
index 7b1adeee4b0f1956cc78d2e722bf17e5f716454c..939a337128dbf3fb08277995de98437b10b037b8 100644 (file)
@@ -11,7 +11,7 @@ TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_sysc
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
                        test_FCMOV test_FCOMI test_FISTTP \
                        vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
 TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
index 3f0093911f03d5e474d245c4b4e37064643e917b..d1b61ab870f8d9f4d78412b48322f5f58bd5cd76 100644 (file)
 struct mpx_bd_entry {
        union {
                char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
-               void *contents[1];
+               void *contents[0];
        };
 } __attribute__((packed));
 
 struct mpx_bt_entry {
        union {
                char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
-               unsigned long contents[1];
+               unsigned long contents[0];
        };
 } __attribute__((packed));
 
index 3818f25391c24c34beeb20c2e9f937bdfc9b111a..b3cb7670e02661cd2ab66fd3da98b3940dd44c70 100644 (file)
@@ -30,6 +30,7 @@ static inline void sigsafe_printf(const char *format, ...)
        if (!dprint_in_signal) {
                vprintf(format, ap);
        } else {
+               int ret;
                int len = vsnprintf(dprint_in_signal_buffer,
                                    DPRINT_IN_SIGNAL_BUF_SIZE,
                                    format, ap);
@@ -39,7 +40,9 @@ static inline void sigsafe_printf(const char *format, ...)
                 */
                if (len > DPRINT_IN_SIGNAL_BUF_SIZE)
                        len = DPRINT_IN_SIGNAL_BUF_SIZE;
-               write(1, dprint_in_signal_buffer, len);
+               ret = write(1, dprint_in_signal_buffer, len);
+               if (ret < 0)
+                       abort();
        }
        va_end(ap);
 }
index 7a1cc0e56d2d6a5006548f9f4488ae003db24911..bc1b0735bb50ed02963e834c7dc38395f7c6d834 100644 (file)
@@ -250,7 +250,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
        unsigned long ip;
        char *fpregs;
        u32 *pkru_ptr;
-       u64 si_pkey;
+       u64 siginfo_pkey;
        u32 *si_pkey_ptr;
        int pkru_offset;
        fpregset_t fpregset;
@@ -292,9 +292,9 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
        si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
        dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
        dump_mem(si_pkey_ptr - 8, 24);
-       si_pkey = *si_pkey_ptr;
-       pkey_assert(si_pkey < NR_PKEYS);
-       last_si_pkey = si_pkey;
+       siginfo_pkey = *si_pkey_ptr;
+       pkey_assert(siginfo_pkey < NR_PKEYS);
+       last_si_pkey = siginfo_pkey;
 
        if ((si->si_code == SEGV_MAPERR) ||
            (si->si_code == SEGV_ACCERR) ||
@@ -306,7 +306,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
        dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
        /* need __rdpkru() version so we do not do shadow_pkru checking */
        dprintf1("signal pkru from  pkru: %08x\n", __rdpkru());
-       dprintf1("si_pkey from siginfo: %jx\n", si_pkey);
+       dprintf1("pkey from siginfo: %jx\n", siginfo_pkey);
        *(u64 *)pkru_ptr = 0x00000000;
        dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
        pkru_faults++;
diff --git a/tools/wmi/Makefile b/tools/wmi/Makefile
new file mode 100644 (file)
index 0000000..e664f11
--- /dev/null
@@ -0,0 +1,18 @@
+PREFIX ?= /usr
+SBINDIR ?= sbin
+INSTALL ?= install
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+CC = $(CROSS_COMPILE)gcc
+
+TARGET = dell-smbios-example
+
+all: $(TARGET)
+
+%: %.c
+       $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $<
+
+clean:
+       $(RM) $(TARGET)
+
+install: dell-smbios-example
+       $(INSTALL) -D -m 755 $(TARGET) $(DESTDIR)$(PREFIX)/$(SBINDIR)/$(TARGET)
diff --git a/tools/wmi/dell-smbios-example.c b/tools/wmi/dell-smbios-example.c
new file mode 100644 (file)
index 0000000..9d3bde0
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ *  Sample application for SMBIOS communication over WMI interface
+ *  Performs the following:
+ *  - Simple cmd_class/cmd_select lookup for TPM information
+ *  - Simple query of known tokens and their values
+ *  - Simple activation of a token
+ *
+ *  Copyright (C) 2017 Dell, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+/* if uapi header isn't installed, this might not yet exist */
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+#include <linux/wmi.h>
+
+/* It would be better to discover these using udev, but for a simple
+ * application they're hardcoded
+ */
+static const char *ioctl_devfs = "/dev/wmi/dell-smbios";
+static const char *token_sysfs =
+                       "/sys/bus/platform/devices/dell-smbios.0/tokens";
+
+static void show_buffer(struct dell_wmi_smbios_buffer *buffer)
+{
+       printf("Call: %x/%x [%x,%x,%x,%x]\nResults: [%8x,%8x,%8x,%8x]\n",
+       buffer->std.cmd_class, buffer->std.cmd_select,
+       buffer->std.input[0], buffer->std.input[1],
+       buffer->std.input[2], buffer->std.input[3],
+       buffer->std.output[0], buffer->std.output[1],
+       buffer->std.output[2], buffer->std.output[3]);
+}
+
+static int run_wmi_smbios_cmd(struct dell_wmi_smbios_buffer *buffer)
+{
+       int fd;
+       int ret;
+
+       fd = open(ioctl_devfs, O_NONBLOCK);
+       ret = ioctl(fd, DELL_WMI_SMBIOS_CMD, buffer);
+       close(fd);
+       return ret;
+}
+
+static int find_token(__u16 token, __u16 *location, __u16 *value)
+{
+       char location_sysfs[60];
+       char value_sysfs[57];
+       char buf[4096];
+       FILE *f;
+       int ret;
+
+       ret = sprintf(value_sysfs, "%s/%04x_value", token_sysfs, token);
+       if (ret < 0) {
+               printf("sprintf value failed\n");
+               return 2;
+       }
+       f = fopen(value_sysfs, "rb");
+       if (!f) {
+               printf("failed to open %s\n", value_sysfs);
+               return 2;
+       }
+       fread(buf, 1, 4096, f);
+       fclose(f);
+       *value = (__u16) strtol(buf, NULL, 16);
+
+       ret = sprintf(location_sysfs, "%s/%04x_location", token_sysfs, token);
+       if (ret < 0) {
+               printf("sprintf location failed\n");
+               return 1;
+       }
+       f = fopen(location_sysfs, "rb");
+       if (!f) {
+               printf("failed to open %s\n", location_sysfs);
+               return 2;
+       }
+       fread(buf, 1, 4096, f);
+       fclose(f);
+       *location = (__u16) strtol(buf, NULL, 16);
+
+       if (*location)
+               return 0;
+       return 2;
+}
+
+static int token_is_active(__u16 *location, __u16 *cmpvalue,
+                          struct dell_wmi_smbios_buffer *buffer)
+{
+       int ret;
+
+       buffer->std.cmd_class = CLASS_TOKEN_READ;
+       buffer->std.cmd_select = SELECT_TOKEN_STD;
+       buffer->std.input[0] = *location;
+       ret = run_wmi_smbios_cmd(buffer);
+       if (ret != 0 || buffer->std.output[0] != 0)
+               return ret;
+       ret = (buffer->std.output[1] == *cmpvalue);
+       return ret;
+}
+
+static int query_token(__u16 token, struct dell_wmi_smbios_buffer *buffer)
+{
+       __u16 location;
+       __u16 value;
+       int ret;
+
+       ret = find_token(token, &location, &value);
+       if (ret != 0) {
+               printf("unable to find token %04x\n", token);
+               return 1;
+       }
+       return token_is_active(&location, &value, buffer);
+}
+
+static int activate_token(struct dell_wmi_smbios_buffer *buffer,
+                  __u16 token)
+{
+       __u16 location;
+       __u16 value;
+       int ret;
+
+       ret = find_token(token, &location, &value);
+       if (ret != 0) {
+               printf("unable to find token %04x\n", token);
+               return 1;
+       }
+       buffer->std.cmd_class = CLASS_TOKEN_WRITE;
+       buffer->std.cmd_select = SELECT_TOKEN_STD;
+       buffer->std.input[0] = location;
+       buffer->std.input[1] = 1;
+       ret = run_wmi_smbios_cmd(buffer);
+       return ret;
+}
+
+static int query_buffer_size(__u64 *buffer_size)
+{
+       FILE *f;
+
+       f = fopen(ioctl_devfs, "rb");
+       if (!f)
+               return -EINVAL;
+       fread(buffer_size, sizeof(__u64), 1, f);
+       fclose(f);
+       return EXIT_SUCCESS;
+}
+
+int main(void)
+{
+       struct dell_wmi_smbios_buffer *buffer;
+       int ret;
+       __u64 value = 0;
+
+       ret = query_buffer_size(&value);
+       if (ret == EXIT_FAILURE || !value) {
+               printf("Unable to read buffer size\n");
+               goto out;
+       }
+       printf("Detected required buffer size %lld\n", value);
+
+       buffer = malloc(value);
+       if (buffer == NULL) {
+               printf("failed to alloc memory for ioctl\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+       buffer->length = value;
+
+       /* simple SMBIOS call for looking up TPM info */
+       buffer->std.cmd_class = CLASS_FLASH_INTERFACE;
+       buffer->std.cmd_select = SELECT_FLASH_INTERFACE;
+       buffer->std.input[0] = 2;
+       ret = run_wmi_smbios_cmd(buffer);
+       if (ret) {
+               printf("smbios ioctl failed: %d\n", ret);
+               ret = EXIT_FAILURE;
+               goto out;
+       }
+       show_buffer(buffer);
+
+       /* query some tokens */
+       ret = query_token(CAPSULE_EN_TOKEN, buffer);
+       printf("UEFI Capsule enabled token is: %d\n", ret);
+       ret = query_token(CAPSULE_DIS_TOKEN, buffer);
+       printf("UEFI Capsule disabled token is: %d\n", ret);
+
+       /* activate UEFI capsule token if disabled */
+       if (ret) {
+               printf("Enabling UEFI capsule token");
+               if (activate_token(buffer, CAPSULE_EN_TOKEN)) {
+                       printf("activate failed\n");
+                       ret = -1;
+                       goto out;
+               }
+       }
+       ret = EXIT_SUCCESS;
+out:
+       free(buffer);
+       return ret;
+}
index 4db54ff08d9e92b907117ee34d1d302e18fd5dbb..4151250ce8da94ac3c83853e7d6e980515ad23a5 100644 (file)
@@ -817,9 +817,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-       struct irq_desc *desc;
-       struct irq_data *data;
-       int phys_irq;
        int ret;
 
        if (timer->enabled)
@@ -837,26 +834,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
                return -EINVAL;
        }
 
-       /*
-        * Find the physical IRQ number corresponding to the host_vtimer_irq
-        */
-       desc = irq_to_desc(host_vtimer_irq);
-       if (!desc) {
-               kvm_err("%s: no interrupt descriptor\n", __func__);
-               return -EINVAL;
-       }
-
-       data = irq_desc_get_irq_data(desc);
-       while (data->parent_data)
-               data = data->parent_data;
-
-       phys_irq = data->hwirq;
-
-       /*
-        * Tell the VGIC that the virtual interrupt is tied to a
-        * physical interrupt. We do that once per VCPU.
-        */
-       ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
+       ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq);
        if (ret)
                return ret;
 
index 772bf74ac2e9ae8380e0ba2b87b385883eca6d4c..a67c106d73f5c33a039d046c9f46454bbcfc801f 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/mman.h>
 #include <linux/sched.h>
 #include <linux/kvm.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
 #include <trace/events/kvm.h>
 #include <kvm/arm_pmu.h>
 
@@ -175,6 +177,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        int i;
 
+       kvm_vgic_destroy(kvm);
+
        free_percpu(kvm->arch.last_vcpu_ran);
        kvm->arch.last_vcpu_ran = NULL;
 
@@ -184,8 +188,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                        kvm->vcpus[i] = NULL;
                }
        }
-
-       kvm_vgic_destroy(kvm);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -313,11 +315,13 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
        kvm_timer_schedule(vcpu);
+       kvm_vgic_v4_enable_doorbell(vcpu);
 }
 
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 {
        kvm_timer_unschedule(vcpu);
+       kvm_vgic_v4_disable_doorbell(vcpu);
 }
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -611,7 +615,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int ret;
-       sigset_t sigsaved;
 
        if (unlikely(!kvm_vcpu_initialized(vcpu)))
                return -ENOEXEC;
@@ -629,8 +632,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        if (run->immediate_exit)
                return -EINTR;
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       kvm_sigset_activate(vcpu);
 
        ret = 1;
        run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -765,8 +767,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                kvm_pmu_update_run(vcpu);
        }
 
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       kvm_sigset_deactivate(vcpu);
+
        return ret;
 }
 
@@ -1450,6 +1452,46 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
        return NULL;
 }
 
+bool kvm_arch_has_irq_bypass(void)
+{
+       return true;
+}
+
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
+                                         &irqfd->irq_entry);
+}
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
+                                    &irqfd->irq_entry);
+}
+
+void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       kvm_arm_halt_guest(irqfd->kvm);
+}
+
+void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       kvm_arm_resume_guest(irqfd->kvm);
+}
+
 /**
  * Initialize Hyp-mode and memory mappings on all CPUs.
  */
index 91728faa13fdc8650b3bbdd54b00ac42be3dc01f..f5c3d6d7019ea63a7d2376c68f6c392cd2036aaf 100644 (file)
@@ -258,7 +258,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
                        cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
                }
        } else {
-               if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+               if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+                   cpu_if->its_vpe.its_vm)
                        write_gicreg(0, ICH_HCR_EL2);
 
                cpu_if->vgic_elrsr = 0xffff;
@@ -337,9 +338,11 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
                /*
                 * If we need to trap system registers, we must write
                 * ICH_HCR_EL2 anyway, even if no interrupts are being
-                * injected,
+                * injected. Same thing if GICv4 is used, as VLPI
+                * delivery is gated by ICH_HCR_EL2.En.
                 */
-               if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+               if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+                   cpu_if->its_vpe.its_vm)
                        write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
        }
 
index 5801261f3adddeaab819f4ffd23ecd5839d03c8a..62310122ee78eff828eb7f4ae6f8cfdb4ed18f0c 100644 (file)
@@ -285,6 +285,10 @@ int vgic_init(struct kvm *kvm)
        if (ret)
                goto out;
 
+       ret = vgic_v4_init(kvm);
+       if (ret)
+               goto out;
+
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_vgic_vcpu_enable(vcpu);
 
@@ -320,6 +324,9 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
 
        kfree(dist->spis);
        dist->nr_spis = 0;
+
+       if (vgic_supports_direct_msis(kvm))
+               vgic_v4_teardown(kvm);
 }
 
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
index d2a99ab0ade7a2a83a36466d3e76bff88b1e48a5..1f761a9991e7d6ee8f3eddaa67aabd5152387eed 100644 (file)
@@ -38,7 +38,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its);
 static int vgic_its_restore_tables_v0(struct vgic_its *its);
 static int vgic_its_commit_v0(struct vgic_its *its);
 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
-                            struct kvm_vcpu *filter_vcpu);
+                            struct kvm_vcpu *filter_vcpu, bool needs_inv);
 
 /*
  * Creates a new (reference to a) struct vgic_irq for a given LPI.
@@ -106,7 +106,7 @@ out_unlock:
         * However we only have those structs for mapped IRQs, so we read in
         * the respective config data from memory here upon mapping the LPI.
         */
-       ret = update_lpi_config(kvm, irq, NULL);
+       ret = update_lpi_config(kvm, irq, NULL, false);
        if (ret)
                return ERR_PTR(ret);
 
@@ -273,7 +273,7 @@ static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
  * VCPU. Unconditionally applies if filter_vcpu is NULL.
  */
 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
-                            struct kvm_vcpu *filter_vcpu)
+                            struct kvm_vcpu *filter_vcpu, bool needs_inv)
 {
        u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
        u8 prop;
@@ -292,11 +292,17 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
                irq->priority = LPI_PROP_PRIORITY(prop);
                irq->enabled = LPI_PROP_ENABLE_BIT(prop);
 
-               vgic_queue_irq_unlock(kvm, irq, flags);
-       } else {
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               if (!irq->hw) {
+                       vgic_queue_irq_unlock(kvm, irq, flags);
+                       return 0;
+               }
        }
 
+       spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+       if (irq->hw)
+               return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
+
        return 0;
 }
 
@@ -336,6 +342,29 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
        return i;
 }
 
+static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
+{
+       int ret = 0;
+
+       spin_lock(&irq->irq_lock);
+       irq->target_vcpu = vcpu;
+       spin_unlock(&irq->irq_lock);
+
+       if (irq->hw) {
+               struct its_vlpi_map map;
+
+               ret = its_get_vlpi(irq->host_irq, &map);
+               if (ret)
+                       return ret;
+
+               map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+               ret = its_map_vlpi(irq->host_irq, &map);
+       }
+
+       return ret;
+}
+
 /*
  * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
  * is targeting) to the VGIC's view, which deals with target VCPUs.
@@ -350,10 +379,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
                return;
 
        vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
-
-       spin_lock(&ite->irq->irq_lock);
-       ite->irq->target_vcpu = vcpu;
-       spin_unlock(&ite->irq->irq_lock);
+       update_affinity(ite->irq, vcpu);
 }
 
 /*
@@ -505,19 +531,11 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
        return 0;
 }
 
-/*
- * Find the target VCPU and the LPI number for a given devid/eventid pair
- * and make this IRQ pending, possibly injecting it.
- * Must be called with the its_lock mutex held.
- * Returns 0 on success, a positive error value for any ITS mapping
- * related errors and negative error values for generic errors.
- */
-static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
-                               u32 devid, u32 eventid)
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+                        u32 devid, u32 eventid, struct vgic_irq **irq)
 {
        struct kvm_vcpu *vcpu;
        struct its_ite *ite;
-       unsigned long flags;
 
        if (!its->enabled)
                return -EBUSY;
@@ -533,26 +551,65 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
        if (!vcpu->arch.vgic_cpu.lpis_enabled)
                return -EBUSY;
 
-       spin_lock_irqsave(&ite->irq->irq_lock, flags);
-       ite->irq->pending_latch = true;
-       vgic_queue_irq_unlock(kvm, ite->irq, flags);
-
+       *irq = ite->irq;
        return 0;
 }
 
-static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
 {
+       u64 address;
+       struct kvm_io_device *kvm_io_dev;
        struct vgic_io_device *iodev;
 
-       if (dev->ops != &kvm_io_gic_ops)
-               return NULL;
+       if (!vgic_has_its(kvm))
+               return ERR_PTR(-ENODEV);
 
-       iodev = container_of(dev, struct vgic_io_device, dev);
+       if (!(msi->flags & KVM_MSI_VALID_DEVID))
+               return ERR_PTR(-EINVAL);
 
+       address = (u64)msi->address_hi << 32 | msi->address_lo;
+
+       kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
+       if (!kvm_io_dev)
+               return ERR_PTR(-EINVAL);
+
+       if (kvm_io_dev->ops != &kvm_io_gic_ops)
+               return ERR_PTR(-EINVAL);
+
+       iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
        if (iodev->iodev_type != IODEV_ITS)
-               return NULL;
+               return ERR_PTR(-EINVAL);
+
+       return iodev->its;
+}
+
+/*
+ * Find the target VCPU and the LPI number for a given devid/eventid pair
+ * and make this IRQ pending, possibly injecting it.
+ * Must be called with the its_lock mutex held.
+ * Returns 0 on success, a positive error value for any ITS mapping
+ * related errors and negative error values for generic errors.
+ */
+static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+                               u32 devid, u32 eventid)
+{
+       struct vgic_irq *irq = NULL;
+       unsigned long flags;
+       int err;
+
+       err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
+       if (err)
+               return err;
+
+       if (irq->hw)
+               return irq_set_irqchip_state(irq->host_irq,
+                                            IRQCHIP_STATE_PENDING, true);
+
+       spin_lock_irqsave(&irq->irq_lock, flags);
+       irq->pending_latch = true;
+       vgic_queue_irq_unlock(kvm, irq, flags);
 
-       return iodev;
+       return 0;
 }
 
 /*
@@ -563,30 +620,16 @@ static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
  */
 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 {
-       u64 address;
-       struct kvm_io_device *kvm_io_dev;
-       struct vgic_io_device *iodev;
+       struct vgic_its *its;
        int ret;
 
-       if (!vgic_has_its(kvm))
-               return -ENODEV;
-
-       if (!(msi->flags & KVM_MSI_VALID_DEVID))
-               return -EINVAL;
+       its = vgic_msi_to_its(kvm, msi);
+       if (IS_ERR(its))
+               return PTR_ERR(its);
 
-       address = (u64)msi->address_hi << 32 | msi->address_lo;
-
-       kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
-       if (!kvm_io_dev)
-               return -EINVAL;
-
-       iodev = vgic_get_its_iodev(kvm_io_dev);
-       if (!iodev)
-               return -EINVAL;
-
-       mutex_lock(&iodev->its->its_lock);
-       ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
-       mutex_unlock(&iodev->its->its_lock);
+       mutex_lock(&its->its_lock);
+       ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
+       mutex_unlock(&its->its_lock);
 
        if (ret < 0)
                return ret;
@@ -608,8 +651,12 @@ static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
        list_del(&ite->ite_list);
 
        /* This put matches the get in vgic_add_lpi. */
-       if (ite->irq)
+       if (ite->irq) {
+               if (ite->irq->hw)
+                       WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+
                vgic_put_irq(kvm, ite->irq);
+       }
 
        kfree(ite);
 }
@@ -683,11 +730,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
        ite->collection = collection;
        vcpu = kvm_get_vcpu(kvm, collection->target_addr);
 
-       spin_lock(&ite->irq->irq_lock);
-       ite->irq->target_vcpu = vcpu;
-       spin_unlock(&ite->irq->irq_lock);
-
-       return 0;
+       return update_affinity(ite->irq, vcpu);
 }
 
 /*
@@ -1054,6 +1097,10 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
 
        ite->irq->pending_latch = false;
 
+       if (ite->irq->hw)
+               return irq_set_irqchip_state(ite->irq->host_irq,
+                                            IRQCHIP_STATE_PENDING, false);
+
        return 0;
 }
 
@@ -1073,7 +1120,7 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
        if (!ite)
                return E_ITS_INV_UNMAPPED_INTERRUPT;
 
-       return update_lpi_config(kvm, ite->irq, NULL);
+       return update_lpi_config(kvm, ite->irq, NULL, true);
 }
 
 /*
@@ -1108,12 +1155,15 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
                irq = vgic_get_irq(kvm, NULL, intids[i]);
                if (!irq)
                        continue;
-               update_lpi_config(kvm, irq, vcpu);
+               update_lpi_config(kvm, irq, vcpu, false);
                vgic_put_irq(kvm, irq);
        }
 
        kfree(intids);
 
+       if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
+               its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
+
        return 0;
 }
 
@@ -1128,11 +1178,12 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
                                      u64 *its_cmd)
 {
-       struct vgic_dist *dist = &kvm->arch.vgic;
        u32 target1_addr = its_cmd_get_target_addr(its_cmd);
        u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
        struct kvm_vcpu *vcpu1, *vcpu2;
        struct vgic_irq *irq;
+       u32 *intids;
+       int irq_count, i;
 
        if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
            target2_addr >= atomic_read(&kvm->online_vcpus))
@@ -1144,19 +1195,19 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
        vcpu1 = kvm_get_vcpu(kvm, target1_addr);
        vcpu2 = kvm_get_vcpu(kvm, target2_addr);
 
-       spin_lock(&dist->lpi_list_lock);
+       irq_count = vgic_copy_lpi_list(vcpu1, &intids);
+       if (irq_count < 0)
+               return irq_count;
 
-       list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
-               spin_lock(&irq->irq_lock);
+       for (i = 0; i < irq_count; i++) {
+               irq = vgic_get_irq(kvm, NULL, intids[i]);
 
-               if (irq->target_vcpu == vcpu1)
-                       irq->target_vcpu = vcpu2;
+               update_affinity(irq, vcpu2);
 
-               spin_unlock(&irq->irq_lock);
+               vgic_put_irq(kvm, irq);
        }
 
-       spin_unlock(&dist->lpi_list_lock);
-
+       kfree(intids);
        return 0;
 }
 
@@ -1634,6 +1685,14 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
        if (!its)
                return -ENOMEM;
 
+       if (vgic_initialized(dev->kvm)) {
+               int ret = vgic_v4_init(dev->kvm);
+               if (ret < 0) {
+                       kfree(its);
+                       return ret;
+               }
+       }
+
        mutex_init(&its->its_lock);
        mutex_init(&its->cmd_lock);
 
@@ -1946,6 +2005,15 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
        list_for_each_entry(ite, &device->itt_head, ite_list) {
                gpa_t gpa = base + ite->event_id * ite_esz;
 
+               /*
+                * If an LPI carries the HW bit, this means that this
+                * interrupt is controlled by GICv4, and we do not
+                * have direct access to that state. Let's simply fail
+                * the save operation...
+                */
+               if (ite->irq->hw)
+                       return -EACCES;
+
                ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
                if (ret)
                        return ret;
index 83786108829e38feb57267648c88acb04173f8ca..671fe81f8e1de991e1636a9901012b0a203618a4 100644 (file)
@@ -54,6 +54,11 @@ bool vgic_has_its(struct kvm *kvm)
        return dist->has_its;
 }
 
+bool vgic_supports_direct_msis(struct kvm *kvm)
+{
+       return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
+}
+
 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
                                            gpa_t addr, unsigned int len)
 {
index 863351c090d8f2129ec8224507fd75bff15d38d2..2f05f732d3fd467e600e8b223b36cd80ac91303a 100644 (file)
@@ -24,6 +24,7 @@
 static bool group0_trap;
 static bool group1_trap;
 static bool common_trap;
+static bool gicv4_enable;
 
 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
 {
@@ -461,6 +462,12 @@ static int __init early_common_trap_cfg(char *buf)
 }
 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
 
+static int __init early_gicv4_enable(char *buf)
+{
+       return strtobool(buf, &gicv4_enable);
+}
+early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
+
 /**
  * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
  * @node:      pointer to the DT node
@@ -480,6 +487,13 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
        kvm_vgic_global_state.can_emulate_gicv2 = false;
        kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
 
+       /* GICv4 support? */
+       if (info->has_v4) {
+               kvm_vgic_global_state.has_gicv4 = gicv4_enable;
+               kvm_info("GICv4 support %sabled\n",
+                        gicv4_enable ? "en" : "dis");
+       }
+
        if (!info->vcpu.start) {
                kvm_info("GICv3: no GICV resource entry\n");
                kvm_vgic_global_state.vcpu_base = 0;
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
new file mode 100644 (file)
index 0000000..53c324a
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2017 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kvm_host.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include "vgic.h"
+
+/*
+ * How KVM uses GICv4 (insert rude comments here):
+ *
+ * The vgic-v4 layer acts as a bridge between several entities:
+ * - The GICv4 ITS representation offered by the ITS driver
+ * - VFIO, which is in charge of the PCI endpoint
+ * - The virtual ITS, which is the only thing the guest sees
+ *
+ * The configuration of VLPIs is triggered by a callback from VFIO,
+ * instructing KVM that a PCI device has been configured to deliver
+ * MSIs to a vITS.
+ *
+ * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
+ * and this is used to find the corresponding vITS data structures
+ * (ITS instance, device, event and irq) using a process that is
+ * extremely similar to the injection of an MSI.
+ *
+ * At this stage, we can link the guest's view of an LPI (uniquely
+ * identified by the routing entry) and the host irq, using the GICv4
+ * driver mapping operation. Should the mapping succeed, we've then
+ * successfully upgraded the guest's LPI to a VLPI. We can then start
+ * with updating GICv4's view of the property table and generating an
+ * INValidation in order to kickstart the delivery of this VLPI to the
+ * guest directly, without software intervention. Well, almost.
+ *
+ * When the PCI endpoint is deconfigured, this operation is reversed
+ * with VFIO calling kvm_vgic_v4_unset_forwarding().
+ *
+ * Once the VLPI has been mapped, it needs to follow any change the
+ * guest performs on its LPI through the vITS. For that, a number of
+ * command handlers have hooks to communicate these changes to the HW:
+ * - Any invalidation triggers a call to its_prop_update_vlpi()
+ * - The INT command results in a irq_set_irqchip_state(), which
+ *   generates an INT on the corresponding VLPI.
+ * - The CLEAR command results in a irq_set_irqchip_state(), which
+ *   generates an CLEAR on the corresponding VLPI.
+ * - DISCARD translates into an unmap, similar to a call to
+ *   kvm_vgic_v4_unset_forwarding().
+ * - MOVI is translated by an update of the existing mapping, changing
+ *   the target vcpu, resulting in a VMOVI being generated.
+ * - MOVALL is translated by a string of mapping updates (similar to
+ *   the handling of MOVI). MOVALL is horrible.
+ *
+ * Note that a DISCARD/MAPTI sequence emitted from the guest without
+ * reprogramming the PCI endpoint after MAPTI does not result in a
+ * VLPI being mapped, as there is no callback from VFIO (the guest
+ * will get the interrupt via the normal SW injection). Fixing this is
+ * not trivial, and requires some horrible messing with the VFIO
+ * internals. Not fun. Don't do that.
+ *
+ * Then there is the scheduling. Each time a vcpu is about to run on a
+ * physical CPU, KVM must tell the corresponding redistributor about
+ * it. And if we've migrated our vcpu from one CPU to another, we must
+ * tell the ITS (so that the messages reach the right redistributor).
+ * This is done in two steps: first issue a irq_set_affinity() on the
+ * irq corresponding to the vcpu, then call its_schedule_vpe(). You
+ * must be in a non-preemptible context. On exit, another call to
+ * its_schedule_vpe() tells the redistributor that we're done with the
+ * vcpu.
+ *
+ * Finally, the doorbell handling: Each vcpu is allocated an interrupt
+ * which will fire each time a VLPI is made pending whilst the vcpu is
+ * not running. Each time the vcpu gets blocked, the doorbell
+ * interrupt gets enabled. When the vcpu is unblocked (for whatever
+ * reason), the doorbell interrupt is disabled.
+ */
+
+#define DB_IRQ_FLAGS   (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
+
+static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
+{
+       struct kvm_vcpu *vcpu = info;
+
+       vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+       kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+       kvm_vcpu_kick(vcpu);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * vgic_v4_init - Initialize the GICv4 data structures
+ * @kvm:       Pointer to the VM being initialized
+ *
+ * We may be called each time a vITS is created, or when the
+ * vgic is initialized. This relies on kvm->lock to be
+ * held. In both cases, the number of vcpus should now be
+ * fixed.
+ */
+int vgic_v4_init(struct kvm *kvm)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct kvm_vcpu *vcpu;
+       int i, nr_vcpus, ret;
+
+       if (!vgic_supports_direct_msis(kvm))
+               return 0; /* Nothing to see here... move along. */
+
+       if (dist->its_vm.vpes)
+               return 0;
+
+       nr_vcpus = atomic_read(&kvm->online_vcpus);
+
+       dist->its_vm.vpes = kzalloc(sizeof(*dist->its_vm.vpes) * nr_vcpus,
+                                   GFP_KERNEL);
+       if (!dist->its_vm.vpes)
+               return -ENOMEM;
+
+       dist->its_vm.nr_vpes = nr_vcpus;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+       ret = its_alloc_vcpu_irqs(&dist->its_vm);
+       if (ret < 0) {
+               kvm_err("VPE IRQ allocation failure\n");
+               kfree(dist->its_vm.vpes);
+               dist->its_vm.nr_vpes = 0;
+               dist->its_vm.vpes = NULL;
+               return ret;
+       }
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               int irq = dist->its_vm.vpes[i]->irq;
+
+               /*
+                * Don't automatically enable the doorbell, as we're
+                * flipping it back and forth when the vcpu gets
+                * blocked. Also disable the lazy disabling, as the
+                * doorbell could kick us out of the guest too
+                * early...
+                */
+               irq_set_status_flags(irq, DB_IRQ_FLAGS);
+               ret = request_irq(irq, vgic_v4_doorbell_handler,
+                                 0, "vcpu", vcpu);
+               if (ret) {
+                       kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+                       /*
+                        * Trick: adjust the number of vpes so we know
+                        * how many to nuke on teardown...
+                        */
+                       dist->its_vm.nr_vpes = i;
+                       break;
+               }
+       }
+
+       if (ret)
+               vgic_v4_teardown(kvm);
+
+       return ret;
+}
+
+/**
+ * vgic_v4_teardown - Free the GICv4 data structures
+ * @kvm:       Pointer to the VM being destroyed
+ *
+ * Relies on kvm->lock to be held.
+ */
+void vgic_v4_teardown(struct kvm *kvm)
+{
+       struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
+       int i;
+
+       if (!its_vm->vpes)
+               return;
+
+       for (i = 0; i < its_vm->nr_vpes; i++) {
+               struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
+               int irq = its_vm->vpes[i]->irq;
+
+               irq_clear_status_flags(irq, DB_IRQ_FLAGS);
+               free_irq(irq, vcpu);
+       }
+
+       its_free_vcpu_irqs(its_vm);
+       kfree(its_vm->vpes);
+       its_vm->nr_vpes = 0;
+       its_vm->vpes = NULL;
+}
+
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+       if (!vgic_supports_direct_msis(vcpu->kvm))
+               return 0;
+
+       return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
+}
+
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+       int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+       int err;
+
+       if (!vgic_supports_direct_msis(vcpu->kvm))
+               return 0;
+
+       /*
+        * Before making the VPE resident, make sure the redistributor
+        * corresponding to our current CPU expects us here. See the
+        * doc in drivers/irqchip/irq-gic-v4.c to understand how this
+        * turns into a VMOVP command at the ITS level.
+        */
+       err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
+       if (err)
+               return err;
+
+       err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
+       if (err)
+               return err;
+
+       /*
+        * Now that the VPE is resident, let's get rid of a potential
+        * doorbell interrupt that would still be pending.
+        */
+       err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
+
+       return err;
+}
+
+static struct vgic_its *vgic_get_its(struct kvm *kvm,
+                                    struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+       struct kvm_msi msi  = (struct kvm_msi) {
+               .address_lo     = irq_entry->msi.address_lo,
+               .address_hi     = irq_entry->msi.address_hi,
+               .data           = irq_entry->msi.data,
+               .flags          = irq_entry->msi.flags,
+               .devid          = irq_entry->msi.devid,
+       };
+
+       return vgic_msi_to_its(kvm, &msi);
+}
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
+                              struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+       struct vgic_its *its;
+       struct vgic_irq *irq;
+       struct its_vlpi_map map;
+       int ret;
+
+       if (!vgic_supports_direct_msis(kvm))
+               return 0;
+
+       /*
+        * Get the ITS, and escape early on error (not a valid
+        * doorbell for any of our vITSs).
+        */
+       its = vgic_get_its(kvm, irq_entry);
+       if (IS_ERR(its))
+               return 0;
+
+       mutex_lock(&its->its_lock);
+
+       /* Perform then actual DevID/EventID -> LPI translation. */
+       ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+                                  irq_entry->msi.data, &irq);
+       if (ret)
+               goto out;
+
+       /*
+        * Emit the mapping request. If it fails, the ITS probably
+        * isn't v4 compatible, so let's silently bail out. Holding
+        * the ITS lock should ensure that nothing can modify the
+        * target vcpu.
+        */
+       map = (struct its_vlpi_map) {
+               .vm             = &kvm->arch.vgic.its_vm,
+               .vpe            = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
+               .vintid         = irq->intid,
+               .properties     = ((irq->priority & 0xfc) |
+                                  (irq->enabled ? LPI_PROP_ENABLED : 0) |
+                                  LPI_PROP_GROUP1),
+               .db_enabled     = true,
+       };
+
+       ret = its_map_vlpi(virq, &map);
+       if (ret)
+               goto out;
+
+       irq->hw         = true;
+       irq->host_irq   = virq;
+
+out:
+       mutex_unlock(&its->its_lock);
+       return ret;
+}
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
+                                struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+       struct vgic_its *its;
+       struct vgic_irq *irq;
+       int ret;
+
+       if (!vgic_supports_direct_msis(kvm))
+               return 0;
+
+       /*
+        * Get the ITS, and escape early on error (not a valid
+        * doorbell for any of our vITSs).
+        */
+       its = vgic_get_its(kvm, irq_entry);
+       if (IS_ERR(its))
+               return 0;
+
+       mutex_lock(&its->its_lock);
+
+       ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+                                  irq_entry->msi.data, &irq);
+       if (ret)
+               goto out;
+
+       WARN_ON(!(irq->hw && irq->host_irq == virq));
+       irq->hw = false;
+       ret = its_unmap_vlpi(virq);
+
+out:
+       mutex_unlock(&its->its_lock);
+       return ret;
+}
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
+{
+       if (vgic_supports_direct_msis(vcpu->kvm)) {
+               int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+               if (irq)
+                       enable_irq(irq);
+       }
+}
+
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
+{
+       if (vgic_supports_direct_msis(vcpu->kvm)) {
+               int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+               if (irq)
+                       disable_irq(irq);
+       }
+}
index e54ef2fdf73dd391246c16474a3b3e652dc57300..b168a328a9e0748052d506e63df613517a7f7e17 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/list_sort.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include "vgic.h"
 
@@ -409,25 +411,56 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
        return 0;
 }
 
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
+/* @irq->irq_lock must be held */
+static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+                           unsigned int host_irq)
 {
-       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       struct irq_desc *desc;
+       struct irq_data *data;
+
+       /*
+        * Find the physical IRQ number corresponding to @host_irq
+        */
+       desc = irq_to_desc(host_irq);
+       if (!desc) {
+               kvm_err("%s: no interrupt descriptor\n", __func__);
+               return -EINVAL;
+       }
+       data = irq_desc_get_irq_data(desc);
+       while (data->parent_data)
+               data = data->parent_data;
+
+       irq->hw = true;
+       irq->host_irq = host_irq;
+       irq->hwintid = data->hwirq;
+       return 0;
+}
+
+/* @irq->irq_lock must be held */
+static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
+{
+       irq->hw = false;
+       irq->hwintid = 0;
+}
+
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+                         u32 vintid)
+{
+       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        unsigned long flags;
+       int ret;
 
        BUG_ON(!irq);
 
        spin_lock_irqsave(&irq->irq_lock, flags);
-
-       irq->hw = true;
-       irq->hwintid = phys_irq;
-
+       ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
        spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
-       return 0;
+       return ret;
 }
 
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
 {
        struct vgic_irq *irq;
        unsigned long flags;
@@ -435,14 +468,11 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
        if (!vgic_initialized(vcpu->kvm))
                return -EAGAIN;
 
-       irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        BUG_ON(!irq);
 
        spin_lock_irqsave(&irq->irq_lock, flags);
-
-       irq->hw = false;
-       irq->hwintid = 0;
-
+       kvm_vgic_unmap_irq(irq);
        spin_unlock_irqrestore(&irq->irq_lock, flags);
        vgic_put_irq(vcpu->kvm, irq);
 
@@ -688,6 +718,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
+       WARN_ON(vgic_v4_sync_hwstate(vcpu));
+
        /* An empty ap_list_head implies used_lrs == 0 */
        if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
                return;
@@ -700,6 +732,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 /* Flush our emulation state into the GIC hardware before entering the guest. */
 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 {
+       WARN_ON(vgic_v4_flush_hwstate(vcpu));
+
        /*
         * If there are no virtual interrupts active or pending for this
         * VCPU, then there is no work to do and we can bail out without
@@ -751,6 +785,9 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
        if (!vcpu->kvm->arch.vgic.enabled)
                return false;
 
+       if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
+               return true;
+
        spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
@@ -784,9 +821,9 @@ void vgic_kick_vcpus(struct kvm *kvm)
        }
 }
 
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
 {
-       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
        bool map_is_active;
        unsigned long flags;
 
index 4f8aecb07ae6fbf109a7a049939426c980104a4f..efbcf8f96f9c1a1bec87ce874103027c10de47ac 100644 (file)
@@ -237,4 +237,14 @@ static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
        }
 }
 
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+                        u32 devid, u32 eventid, struct vgic_irq **irq);
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
+
+bool vgic_supports_direct_msis(struct kvm *kvm);
+int vgic_v4_init(struct kvm *kvm);
+void vgic_v4_teardown(struct kvm *kvm);
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
+
 #endif
index f169ecc4f2e87f44ece32540b8428529aa01ae84..c422c10cd1dd176a973b234f742414cd2443cbff 100644 (file)
@@ -2065,6 +2065,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
 
+void kvm_sigset_activate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       /*
+        * This does a lockless modification of ->real_blocked, which is fine
+        * because, only current can change ->real_blocked and all readers of
+        * ->real_blocked don't care as long ->real_blocked is always a subset
+        * of ->blocked.
+        */
+       sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
+}
+
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu->sigset_active)
+               return;
+
+       sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
+       sigemptyset(&current->real_blocked);
+}
+
 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
        unsigned int old, val, grow;